diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 61a1941..b937d63 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -534,6 +534,11 @@ spark.query.files=add_part_multiple.q, \ bucketmapjoin_negative.q, \ bucketmapjoin_negative2.q, \ bucketmapjoin_negative3.q, \ + bucketsortoptimize_insert_2.q, \ + bucketsortoptimize_insert_4.q, \ + bucketsortoptimize_insert_6.q, \ + bucketsortoptimize_insert_7.q, \ + bucketsortoptimize_insert_8.q, \ bucket_map_join_1.q, \ bucket_map_join_2.q, \ bucket_map_join_spark1.q \ diff --git ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out new file mode 100644 index 0000000..b58091c --- /dev/null +++ ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out @@ -0,0 +1,994 @@ +PREHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table1 +POSTHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table1 +PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table2 +POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table2 +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table3 +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table3 +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table1@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table1@ds=1 +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table2@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table2@ds=1 +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table1@ds=2 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table1@ds=2 +POSTHOOK: Lineage: test_table1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table2@ds=2 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table2@ds=2 +POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col7 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +2 val_2val_2 1 +4 val_4val_4 1 +8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +9 val_9val_9 1 +PREHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected, +-- it should be a map-reduce job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds is not null and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected, +-- it should be a map-reduce job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds is not null and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 20 Data size: 140 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col7 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + value expressions: _col0 (type: int), _col1 (type: string) + Reducer 2 + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds is not null and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table1@ds=2 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds is not null and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table1@ds=2 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +2 val_2val_2 1 +2 val_2val_2 1 +4 val_4val_4 1 +4 val_4val_4 1 +8 val_8val_8 1 +8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +9 val_9val_9 1 +9 val_9val_9 1 +PREHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only +-- job even though multiple partitions of 'b' are being selected +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds is not null +PREHOOK: type: QUERY +POSTHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only +-- job even though multiple partitions of 'b' are being selected +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds is not null +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col7 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2@ds=2 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2@ds=2 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +2 val_2val_2 1 +2 val_2val_2 1 +4 val_4val_4 1 +4 val_4val_4 1 +8 val_8val_8 1 +8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +9 val_9val_9 1 +9 val_9val_9 1 +PREHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col3) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +2 val_2val_2 1 +4 val_4val_4 1 +8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +9 val_9val_9 1 +PREHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.v1, b.v2) +FROM +(select key, concat(value, value) as v1 from test_table1 where ds = '1') a +JOIN +(select key, concat(value, value) as v2 from test_table2 where ds = '1') b +ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.v1, b.v2) +FROM +(select key, concat(value, value) as v1 from test_table1 where ds = '1') a +JOIN +(select key, concat(value, value) as v2 from test_table2 where ds = '1') b +ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), concat(value, value) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col3) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.v1, b.v2) +FROM +(select key, concat(value, value) as v1 from test_table1 where ds = '1') a +JOIN +(select key, concat(value, value) as v2 from test_table2 where ds = '1') b +ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.v1, b.v2) +FROM +(select key, concat(value, value) as v1 from test_table1 where ds = '1') a +JOIN +(select key, concat(value, value) as v2 from test_table2 where ds = '1') b +ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +0 val_0val_0val_0val_0 1 +2 val_2val_2val_2val_2 1 +4 val_4val_4val_4val_4 1 +8 val_8val_8val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +5 val_5val_5val_5val_5 1 +9 val_9val_9val_9val_9 1 +PREHOOK: query: -- This should be a map-reduce job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key+a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- This should be a map-reduce job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key+a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + Select Operator + expressions: (_col0 + _col0) (type: int), concat(_col1, _col3) (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + value expressions: _col0 (type: int), _col1 (type: string) + Reducer 2 + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key+a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key+a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1') a +JOIN +(select key, value from test_table2 where ds = '1') b +ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key EXPRESSION [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +4 val_2val_2 1 +8 val_4val_4 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +10 val_5val_5 1 +16 val_8val_8 1 +18 val_9val_9 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out new file mode 100644 index 0000000..8ee392e --- /dev/null +++ ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out @@ -0,0 +1,358 @@ +PREHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table1 +POSTHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table1 +PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table2 +POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table2 +PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table3 +POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table3 +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table1@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table1@ds=1 +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table2@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table2@ds=1 +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation, since the insert is happening on the bucketing position +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation, since the insert is happening on the bucketing position +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col7 + Select Operator + expressions: _col0 (type: int), _col0 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +2 2 val_2val_2 1 +4 4 val_4val_4 1 +8 8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +9 9 val_9val_9 1 +PREHOOK: query: DROP TABLE test_table3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_table3 +PREHOOK: Output: default@test_table3 +POSTHOOK: query: DROP TABLE test_table3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_table3 +POSTHOOK: Output: default@test_table3 +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table3 +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table3 +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation, since the insert is happening on a non-bucketing position +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation, since the insert is happening on a non-bucketing position +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + value expressions: _col0 (type: int), _col1 (type: string) + Reducer 2 + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +2 val_2 1 +4 val_4 1 +8 val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +9 val_9 1 +PREHOOK: query: DROP TABLE test_table3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_table3 +PREHOOK: Output: default@test_table3 +POSTHOOK: query: DROP TABLE test_table3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_table3 +POSTHOOK: Output: default@test_table3 diff --git ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out new file mode 100644 index 0000000..9c119df --- /dev/null +++ ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out @@ -0,0 +1,926 @@ +PREHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table1 +POSTHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table1 +PREHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table2 +POSTHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table2 +PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table3 +POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table3 +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, value where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table1@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, value where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table1@ds=1 +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, value where key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table2@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, value where key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table2@ds=1 +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation, since the sort-order matches +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key2, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation, since the sort-order matches +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key2, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key2, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, a.key2, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +2 3 val_2val_2 1 +4 5 val_4val_4 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +8 9 val_8val_8 1 +9 10 val_9val_9 1 +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation, since the sort-order matches +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq1.key, subq1.key2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation, since the sort-order matches +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq1.key, subq1.key2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq1.key, subq1.key2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq1.key, subq1.key2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +2 3 val_2val_2 1 +4 5 val_4val_4 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +8 9 val_8val_8 1 +9 10 val_9val_9 1 +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key2, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key2, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + sort order: +- + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) + Reducer 2 + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq1.key2, subq1.key, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq1.key2, subq1.key, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + sort order: +- + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) + Reducer 2 + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.key, subq2.key2, subq2.value from +( +SELECT subq1.key2, subq1.key, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.key, subq2.key2, subq2.value from +( +SELECT subq1.key2, subq1.key, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.key, subq2.key2, subq2.value from +( +SELECT subq1.key2, subq1.key, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.key, subq2.key2, subq2.value from +( +SELECT subq1.key2, subq1.key, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +2 3 val_2val_2 1 +4 5 val_4val_4 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +8 9 val_8val_8 1 +9 10 val_9val_9 1 +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.k2, subq2.k1, subq2.value from +( +SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.k2, subq2.k1, subq2.value from +( +SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.k2, subq2.k1, subq2.value from +( +SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT subq2.k2, subq2.k1, subq2.value from +( +SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +0 1 val_0val_0 1 +2 3 val_2val_2 1 +4 5 val_4val_4 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +5 6 val_5val_5 1 +8 9 val_8val_8 1 +9 10 val_9val_9 1 +PREHOOK: query: CREATE TABLE test_table4 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table4 +POSTHOOK: query: CREATE TABLE test_table4 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table4 +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation +EXPLAIN +INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1') +SELECT subq2.k2, subq2.k1, subq2.value from +( +SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-reduce operation +EXPLAIN +INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1') +SELECT subq2.k2, subq2.k1, subq2.value from +( +SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from +( +SELECT a.key, a.key2, concat(a.value, b.value) as value +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' +)subq1 +)subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and key2 is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {key2} {value} + 1 {value} + keys: + 0 key (type: int), key2 (type: int) + 1 key (type: int), key2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col9 + Select Operator + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + sort order: -- + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) + Reducer 2 + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table4 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table4 + + Stage: Stage-2 + Stats-Aggr Operator + diff --git ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out new file mode 100644 index 0000000..b9ad92d --- /dev/null +++ ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out @@ -0,0 +1,482 @@ +PREHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table1 +POSTHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table1 +PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table2 +POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table2 +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table3 +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table3 +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table1@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table1@ds=1 +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table2@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table2@ds=1 +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +and (a.key = 0 or a.key = 5) +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +and (a.key = 0 or a.key = 5) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = 0) or (key = 5))) (type: boolean) + Statistics: Num rows: 4 Data size: 28 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col7 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +and (a.key = 0 or a.key = 5) +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +and (a.key = 0 or a.key = 5) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +PREHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a +JOIN +(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b +ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a +JOIN +(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b +ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key = 0) or (key = 5)) and key is not null) (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col3) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a +JOIN +(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b +ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a +JOIN +(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b +ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +PREHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and key < 8) a +JOIN +(select key, value from test_table2 where ds = '1' and key < 8) b +ON a.key = b.key +WHERE a.key = 0 or a.key = 5 +PREHOOK: type: QUERY +POSTHOOK: query: -- This should be a map-only job +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and key < 8) a +JOIN +(select key, value from test_table2 where ds = '1' and key < 8) b +ON a.key = b.key +WHERE a.key = 0 or a.key = 5 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and key is not null) and ((key = 0) or (key = 5))) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + Select Operator + expressions: _col0 (type: int), concat(_col1, _col3) (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and key < 8) a +JOIN +(select key, value from test_table2 where ds = '1' and key < 8) b +ON a.key = b.key +WHERE a.key = 0 or a.key = 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, concat(a.value, b.value) +FROM +(select key, value from test_table1 where ds = '1' and key < 8) a +JOIN +(select key, value from test_table2 where ds = '1' and key < 8) b +ON a.key = b.key +WHERE a.key = 0 or a.key = 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +0 val_0val_0 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 +5 val_5val_5 1 diff --git ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out new file mode 100644 index 0000000..ed4d03f --- /dev/null +++ ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out @@ -0,0 +1,326 @@ +PREHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table1 +POSTHOOK: query: -- Create two bucketed and sorted tables +CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table1 +PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table2 +POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table2 +PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_table3 +POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_table3 +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table1@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table1@ds=1 +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_table2@ds=1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_table2@ds=1 +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, b.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, b.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {key} {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col6, _col7 + Select Operator + expressions: _col0 (type: int), _col6 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, b.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT a.key, b.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +2 2 val_2val_2 1 +4 4 val_4val_4 1 +8 8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +9 9 val_9val_9 1 +PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT b.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table +-- This should be a map-only operation +EXPLAIN +INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT b.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {key} {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col6, _col7 + Select Operator + expressions: _col6 (type: int), _col0 (type: int), concat(_col1, _col7) (type: string) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT b.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Output: default@test_table3@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +SELECT b.key, a.key, concat(a.value, b.value) +FROM test_table1 a JOIN test_table2 b +ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Output: default@test_table3@ds=1 +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +0 0 val_0val_0 1 +2 2 val_2val_2 1 +4 4 val_4val_4 1 +8 8 val_8val_8 1 +PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +5 5 val_5val_5 1 +9 9 val_9val_9 1