diff --git accumulo-handler/src/test/results/positive/accumulo_joins.q.out accumulo-handler/src/test/results/positive/accumulo_joins.q.out deleted file mode 100644 index ed65e08998..0000000000 --- accumulo-handler/src/test/results/positive/accumulo_joins.q.out +++ /dev/null @@ -1,284 +0,0 @@ -PREHOOK: query: DROP TABLE users -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE users -POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE states -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE states -POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE countries -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE countries -POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE users_level -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE users_level -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ( -"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id" -) -TBLPROPERTIES ("external.table.purge" = "true") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@users -POSTHOOK: query: CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ( -"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id" -) -TBLPROPERTIES ("external.table.purge" = "true") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@users -PREHOOK: query: CREATE EXTERNAL TABLE states(key string, name string) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ( -"accumulo.columns.mapping" = ":rowID,state:name" -) -TBLPROPERTIES ("external.table.purge" = "true") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@states -POSTHOOK: query: CREATE EXTERNAL TABLE states(key string, name string) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ( -"accumulo.columns.mapping" = ":rowID,state:name" -) -TBLPROPERTIES ("external.table.purge" = "true") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@states -PREHOOK: query: CREATE EXTERNAL TABLE countries(key string, name string, country string, country_id int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ( -"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id" -) -TBLPROPERTIES ("external.table.purge" = "true") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@countries -POSTHOOK: query: CREATE EXTERNAL TABLE countries(key string, name string, country string, country_id int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ( -"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id" -) -TBLPROPERTIES ("external.table.purge" = "true") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@countries -PREHOOK: query: INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 -FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@users -POSTHOOK: query: INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 -FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@users -PREHOOK: query: INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa' -FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@states -POSTHOOK: query: INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa' -FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@states -PREHOOK: query: INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1 -FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@countries -POSTHOOK: query: INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1 -FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@countries -PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.key) -PREHOOK: type: QUERY -PREHOOK: Input: default@countries -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.key) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@countries -POSTHOOK: Input: default@users -#### A masked pattern was here #### -user1 USA United States USA -PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.country) -PREHOOK: type: QUERY -PREHOOK: Input: default@countries -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.country) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@countries -POSTHOOK: Input: default@users -#### A masked pattern was here #### -user1 USA United States USA -PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country_id = c.country_id) -PREHOOK: type: QUERY -PREHOOK: Input: default@countries -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country_id = c.country_id) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@countries -POSTHOOK: Input: default@users -#### A masked pattern was here #### -PREHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s -ON (u.state = s.key) -PREHOOK: type: QUERY -PREHOOK: Input: default@states -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s -ON (u.state = s.key) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@states -POSTHOOK: Input: default@users -#### A masked pattern was here #### -user1 IA Iowa -PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.key) -PREHOOK: type: QUERY -PREHOOK: Input: default@countries -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.key) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@countries -POSTHOOK: Input: default@users -#### A masked pattern was here #### -user1 USA United States USA -PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.country) -PREHOOK: type: QUERY -PREHOOK: Input: default@countries -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country = c.country) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@countries -POSTHOOK: Input: default@users -#### A masked pattern was here #### -user1 USA United States USA -PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country_id = c.country_id) -PREHOOK: type: QUERY -PREHOOK: Input: default@countries -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c -ON (u.country_id = c.country_id) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@countries -POSTHOOK: Input: default@users -#### A masked pattern was here #### -PREHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s -ON (u.state = s.key) -PREHOOK: type: QUERY -PREHOOK: Input: default@states -PREHOOK: Input: default@users -#### A masked pattern was here #### -POSTHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s -ON (u.state = s.key) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@states -POSTHOOK: Input: default@users -#### A masked pattern was here #### -user1 IA Iowa -PREHOOK: query: DROP TABLE users -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@users -PREHOOK: Output: default@users -POSTHOOK: query: DROP TABLE users -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@users -POSTHOOK: Output: default@users -PREHOOK: query: DROP TABLE states -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@states -PREHOOK: Output: default@states -POSTHOOK: query: DROP TABLE states -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@states -POSTHOOK: Output: default@states -PREHOOK: query: DROP TABLE countries -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@countries -PREHOOK: Output: default@countries -POSTHOOK: query: DROP TABLE countries -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@countries -POSTHOOK: Output: default@countries -PREHOOK: query: CREATE EXTERNAL TABLE users(key int, userid int, username string, created int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created") -TBLPROPERTIES ("external.table.purge" = "true") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@users -POSTHOOK: query: CREATE EXTERNAL TABLE users(key int, userid int, username string, created int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created") -TBLPROPERTIES ("external.table.purge" = "true") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@users -PREHOOK: query: CREATE EXTERNAL TABLE users_level(key int, userid int, level int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level") -TBLPROPERTIES ("external.table.purge" = "true") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@users_level -POSTHOOK: query: CREATE EXTERNAL TABLE users_level(key int, userid int, level int) -STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' -WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level") -TBLPROPERTIES ("external.table.purge" = "true") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@users_level -PREHOOK: query: SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num - FROM users JOIN users_level ON (users.userid = users_level.userid) - GROUP BY year(from_unixtime(users.created)), level -PREHOOK: type: QUERY -PREHOOK: Input: default@users -PREHOOK: Input: default@users_level -#### A masked pattern was here #### -POSTHOOK: query: SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num - FROM users JOIN users_level ON (users.userid = users_level.userid) - GROUP BY year(from_unixtime(users.created)), level -POSTHOOK: type: QUERY -POSTHOOK: Input: default@users -POSTHOOK: Input: default@users_level -#### A masked pattern was here #### -PREHOOK: query: DROP TABLE users -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@users -PREHOOK: Output: default@users -POSTHOOK: query: DROP TABLE users -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@users -POSTHOOK: Output: default@users -PREHOOK: query: DROP TABLE users_level -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@users_level -PREHOOK: Output: default@users_level -POSTHOOK: query: DROP TABLE users_level -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@users_level -POSTHOOK: Output: default@users_level diff --git hbase-handler/src/test/results/negative/cascade_dbdrop_hadoop20.q.out hbase-handler/src/test/results/negative/cascade_dbdrop_hadoop20.q.out deleted file mode 100644 index d4e29171cc..0000000000 --- hbase-handler/src/test/results/negative/cascade_dbdrop_hadoop20.q.out +++ /dev/null @@ -1,51 +0,0 @@ -PREHOOK: query: CREATE DATABASE hbaseDB -PREHOOK: type: CREATEDATABASE -POSTHOOK: query: CREATE DATABASE hbaseDB -POSTHOOK: type: CREATEDATABASE -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) --- Hadoop 0.23 changes the behavior FsShell on Exit Codes --- In Hadoop 0.20 --- Exit Code == 0 on success --- Exit code < 0 on any failure --- In Hadoop 0.23 --- Exit Code == 0 on success --- Exit Code < 0 on syntax/usage error --- Exit Code > 0 operation failed - -CREATE TABLE hbaseDB.hbase_table_0(key int, value string) -STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' -WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string") -TBLPROPERTIES ("hbase.table.name" = "hbase_table_0") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:hbasedb -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) --- Hadoop 0.23 changes the behavior FsShell on Exit Codes --- In Hadoop 0.20 --- Exit Code == 0 on success --- Exit code < 0 on any failure --- In Hadoop 0.23 --- Exit Code == 0 on success --- Exit Code < 0 on syntax/usage error --- Exit Code > 0 operation failed - -CREATE TABLE hbaseDB.hbase_table_0(key int, value string) -STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' -WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string") -TBLPROPERTIES ("hbase.table.name" = "hbase_table_0") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:hbasedb -POSTHOOK: Output: hbaseDB@hbase_table_0 -Found 3 items -#### A masked pattern was here #### -PREHOOK: query: DROP DATABASE IF EXISTS hbaseDB CASCADE -PREHOOK: type: DROPDATABASE -PREHOOK: Input: database:hbasedb -PREHOOK: Output: database:hbasedb -PREHOOK: Output: hbasedb@hbase_table_0 -POSTHOOK: query: DROP DATABASE IF EXISTS hbaseDB CASCADE -POSTHOOK: type: DROPDATABASE -POSTHOOK: Input: database:hbasedb -POSTHOOK: Output: database:hbasedb -POSTHOOK: Output: hbasedb@hbase_table_0 -Command failed with exit code = -1 -Query returned non-zero code: -1, cause: null diff --git itests/util/src/test/java/org/apache/hadoop/hive/cli/control/TestDanglingQOuts.java itests/util/src/test/java/org/apache/hadoop/hive/cli/control/TestDanglingQOuts.java index 33caeb15ef..34a2e3a6be 100644 --- itests/util/src/test/java/org/apache/hadoop/hive/cli/control/TestDanglingQOuts.java +++ itests/util/src/test/java/org/apache/hadoop/hive/cli/control/TestDanglingQOuts.java @@ -67,27 +67,34 @@ public TestDanglingQOuts() throws Exception { if (clz == CliConfigs.DummyConfig.class) { continue; } - AbstractCliConfig config = (AbstractCliConfig) clz.newInstance(); - Set qfiles = config.getQueryFiles(); - for (File file : qfiles) { - String baseName = file.getName(); - String rd = config.getResultsDir(); - File of = new File(rd, baseName + ".out"); - if (outsNeeded.containsKey(of)) { - System.err.printf("duplicate: [%s;%s] %s\n", config.getClass().getSimpleName(), outsNeeded.get(of).getClass().getSimpleName(), of); - // throw new RuntimeException("duplicate?!"); - } - outsNeeded.put(of, config); + if (clz == CliConfigs.TezPerfCliConfig.class) { + handleCliConfig(new CliConfigs.TezPerfCliConfig(true)); + handleCliConfig(new CliConfigs.TezPerfCliConfig(false)); + } else { + handleCliConfig((AbstractCliConfig) clz.newInstance()); } + } + } - File od = new File(config.getResultsDir()); - for (File file : od.listFiles(new QOutFilter())) { - outsFound.add(file); + private void handleCliConfig(AbstractCliConfig config) throws Exception { + Set qfiles = config.getQueryFiles(); + for (File file : qfiles) { + String baseName = file.getName(); + String rd = config.getResultsDir(); + File of = new File(rd, baseName + ".out"); + if (outsNeeded.containsKey(of)) { + System.err.printf("duplicate: [%s;%s] %s\n", config.getClass().getSimpleName(), outsNeeded.get(of).getClass().getSimpleName(), of); + // throw new RuntimeException("duplicate?!"); } + outsNeeded.put(of, config); } - } - @Ignore("Disabling till HIVE-19509 gets solved") + File od = new File(config.getResultsDir()); + for (File file : od.listFiles(new QOutFilter())) { + outsFound.add(file); + } + } + @Test public void checkDanglingQOut() { SetView dangling = Sets.difference(outsFound, outsNeeded.keySet()); diff --git ql/src/test/results/clientnegative/authorization_kill_query.q.out ql/src/test/results/clientnegative/authorization_kill_query.q.out deleted file mode 100644 index d135f34ab7..0000000000 --- ql/src/test/results/clientnegative/authorization_kill_query.q.out +++ /dev/null @@ -1,36 +0,0 @@ -PREHOOK: query: set role ADMIN -PREHOOK: type: SHOW_ROLES -POSTHOOK: query: set role ADMIN -POSTHOOK: type: SHOW_ROLES -PREHOOK: query: explain authorization kill query 'dummyqueryid' -PREHOOK: type: KILL QUERY -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: explain authorization kill query 'dummyqueryid' -POSTHOOK: type: KILL QUERY -INPUTS: -OUTPUTS: - dummyHostnameForTest -CURRENT_USER: - hive_admin_user -OPERATION: - KILL_QUERY -PREHOOK: query: kill query 'dummyqueryid' -PREHOOK: type: KILL QUERY -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: kill query 'dummyqueryid' -POSTHOOK: type: KILL QUERY -PREHOOK: query: explain authorization kill query 'dummyqueryid' -PREHOOK: type: KILL QUERY -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: explain authorization kill query 'dummyqueryid' -POSTHOOK: type: KILL QUERY -INPUTS: -OUTPUTS: - dummyHostnameForTest -CURRENT_USER: - ruser1 -OPERATION: - KILL_QUERY -AUTHORIZATION_FAILURES: - Permission denied: Principal [name=ruser1, type=USER] does not have following privileges for operation KILL_QUERY [ADMIN PRIVILEGE on INPUT, ADMIN PRIVILEGE on OUTPUT] -FAILED: HiveAccessControlException Permission denied: Principal [name=ruser1, type=USER] does not have following privileges for operation KILL_QUERY [ADMIN PRIVILEGE on INPUT, ADMIN PRIVILEGE on OUTPUT] diff --git ql/src/test/results/clientnegative/autolocal1.q.out ql/src/test/results/clientnegative/autolocal1.q.out deleted file mode 100644 index 7b2ea76dd9..0000000000 --- ql/src/test/results/clientnegative/autolocal1.q.out +++ /dev/null @@ -1,16 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20) --- hadoop0.23 changes the behavior of JobClient initialization --- in hadoop0.20, JobClient initialization tries to get JobTracker's address --- this throws the expected IllegalArgumentException --- in hadoop0.23, JobClient initialization only initializes cluster --- and get user group information --- not attempts to get JobTracker's address --- no IllegalArgumentException thrown in JobClient Initialization --- an exception is thrown when JobClient submitJob - -SELECT key FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -Job Submission failed with exception 'java.lang.RuntimeException(Not a host:port pair: abracadabra)' -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask diff --git ql/src/test/results/clientnegative/distinct_missing_groupby.q.out ql/src/test/results/clientnegative/distinct_missing_groupby.q.out deleted file mode 100644 index ec369763c2..0000000000 --- ql/src/test/results/clientnegative/distinct_missing_groupby.q.out +++ /dev/null @@ -1 +0,0 @@ -FAILED: SemanticException [Error 10025]: Line 2:16 Expression not in GROUP BY key 'key' diff --git ql/src/test/results/clientpositive/auto_join14_hadoop20.q.out ql/src/test/results/clientpositive/auto_join14_hadoop20.q.out deleted file mode 100644 index 4f3e8f79e9..0000000000 --- ql/src/test/results/clientpositive/auto_join14_hadoop20.q.out +++ /dev/null @@ -1,123 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: explain -FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -PREHOOK: type: QUERY -POSTHOOK: query: explain -FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-5 is a root stage - Stage-4 depends on stages: Stage-5 - Stage-0 depends on stages: Stage-4 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-5 - Map Reduce Local Work - Alias -> Map Local Tables: - src - Fetch Operator - limit: -1 - Alias -> Map Local Operator Tree: - src - TableScan - alias: src - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((key > 100) and key is not null) (type: boolean) - Statistics: Num rows: 10 Data size: 1002 Basic stats: COMPLETE Column stats: NONE - HashTable Sink Operator - condition expressions: - 0 - 1 {value} - keys: - 0 key (type: string) - 1 key (type: string) - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((key > 100) and key is not null) (type: boolean) - Statistics: Num rows: 10 Data size: 2004 Basic stats: COMPLETE Column stats: NONE - Map Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 {key} - 1 {value} - keys: - 0 key (type: string) - 1 key (type: string) - outputColumnNames: _col0, _col5 - Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - Local Work: - Map Reduce Local Work - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -404554174174 diff --git ql/src/test/results/clientpositive/combine2_hadoop20.q.out ql/src/test/results/clientpositive/combine2_hadoop20.q.out deleted file mode 100644 index 1d6024eaaa..0000000000 --- ql/src/test/results/clientpositive/combine2_hadoop20.q.out +++ /dev/null @@ -1,723 +0,0 @@ -PREHOOK: query: USE default -PREHOOK: type: SWITCHDATABASE -POSTHOOK: query: USE default -POSTHOOK: type: SWITCHDATABASE -PREHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table combine2(key string) partitioned by (value string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table combine2(key string) partitioned by (value string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@combine2 -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) --- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0 --- in an attempt to force the generation of multiple splits and multiple output files. --- However, Hadoop 0.20 is incapable of generating splits smaller than the block size --- when using CombineFileInputFormat, so only one split is generated. This has a --- significant impact on the results results of this test. --- This issue was fixed in MAPREDUCE-2046 which is included in 0.22. - -insert overwrite table combine2 partition(value) -select * from ( - select key, value from src where key < 10 - union all - select key, '|' as value from src where key = 11 - union all - select key, '2010-04-21 09:45:00' value from src where key = 19) s -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@combine2 -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) --- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0 --- in an attempt to force the generation of multiple splits and multiple output files. --- However, Hadoop 0.20 is incapable of generating splits smaller than the block size --- when using CombineFileInputFormat, so only one split is generated. This has a --- significant impact on the results results of this test. --- This issue was fixed in MAPREDUCE-2046 which is included in 0.22. - -insert overwrite table combine2 partition(value) -select * from ( - select key, value from src where key < 10 - union all - select key, '|' as value from src where key = 11 - union all - select key, '2010-04-21 09:45:00' value from src where key = 19) s -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@combine2@value=2010-04-21 09%3A45%3A00 -POSTHOOK: Output: default@combine2@value=val_0 -POSTHOOK: Output: default@combine2@value=val_2 -POSTHOOK: Output: default@combine2@value=val_4 -POSTHOOK: Output: default@combine2@value=val_5 -POSTHOOK: Output: default@combine2@value=val_8 -POSTHOOK: Output: default@combine2@value=val_9 -POSTHOOK: Output: default@combine2@value=| -POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: show partitions combine2 -PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@combine2 -POSTHOOK: query: show partitions combine2 -POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@combine2 -value=2010-04-21 09%3A45%3A00 -value=val_0 -value=val_2 -value=val_4 -value=val_5 -value=val_8 -value=val_9 -value=| -PREHOOK: query: explain -select key, value from combine2 where value is not null -PREHOOK: type: QUERY -POSTHOOK: query: explain -select key, value from combine2 where value is not null -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: combine2 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select key, value from combine2 where value is not null -PREHOOK: type: QUERY -PREHOOK: Input: default@combine2 -PREHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -PREHOOK: Input: default@combine2@value=val_0 -PREHOOK: Input: default@combine2@value=val_2 -PREHOOK: Input: default@combine2@value=val_4 -PREHOOK: Input: default@combine2@value=val_5 -PREHOOK: Input: default@combine2@value=val_8 -PREHOOK: Input: default@combine2@value=val_9 -PREHOOK: Input: default@combine2@value=| -#### A masked pattern was here #### -POSTHOOK: query: select key, value from combine2 where value is not null -POSTHOOK: type: QUERY -POSTHOOK: Input: default@combine2 -POSTHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -POSTHOOK: Input: default@combine2@value=val_0 -POSTHOOK: Input: default@combine2@value=val_2 -POSTHOOK: Input: default@combine2@value=val_4 -POSTHOOK: Input: default@combine2@value=val_5 -POSTHOOK: Input: default@combine2@value=val_8 -POSTHOOK: Input: default@combine2@value=val_9 -POSTHOOK: Input: default@combine2@value=| -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -11 | -19 2010-04-21 09:45:00 -2 val_2 -4 val_4 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -PREHOOK: query: explain extended -select count(1) from combine2 where value is not null -PREHOOK: type: QUERY -POSTHOOK: query: explain extended -select count(1) from combine2 where value is not null -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - combine2 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_WHERE - TOK_FUNCTION - TOK_ISNOTNULL - TOK_TABLE_OR_COL - value - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: combine2 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(1) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: value=2010-04-21 09%3A45%3A00 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value 2010-04-21 09:45:00 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 2 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_0 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_0 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 3 - partition_columns value - partition_columns.types string - rawDataSize 3 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 6 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_2 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_4 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_4 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_5 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_5 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 3 - partition_columns value - partition_columns.types string - rawDataSize 3 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 6 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_8 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_8 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_9 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_9 - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=| - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value | - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 2 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 - Truncated Path -> Alias: - /combine2/value=2010-04-21 09%3A45%3A00 [combine2] - /combine2/value=val_0 [combine2] - /combine2/value=val_2 [combine2] - /combine2/value=val_4 [combine2] - /combine2/value=val_5 [combine2] - /combine2/value=val_8 [combine2] - /combine2/value=val_9 [combine2] - /combine2/value=| [combine2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(1) from combine2 where value is not null -PREHOOK: type: QUERY -PREHOOK: Input: default@combine2 -PREHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -PREHOOK: Input: default@combine2@value=val_0 -PREHOOK: Input: default@combine2@value=val_2 -PREHOOK: Input: default@combine2@value=val_4 -PREHOOK: Input: default@combine2@value=val_5 -PREHOOK: Input: default@combine2@value=val_8 -PREHOOK: Input: default@combine2@value=val_9 -PREHOOK: Input: default@combine2@value=| -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from combine2 where value is not null -POSTHOOK: type: QUERY -POSTHOOK: Input: default@combine2 -POSTHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -POSTHOOK: Input: default@combine2@value=val_0 -POSTHOOK: Input: default@combine2@value=val_2 -POSTHOOK: Input: default@combine2@value=val_4 -POSTHOOK: Input: default@combine2@value=val_5 -POSTHOOK: Input: default@combine2@value=val_8 -POSTHOOK: Input: default@combine2@value=val_9 -POSTHOOK: Input: default@combine2@value=| -#### A masked pattern was here #### -12 -PREHOOK: query: explain -select ds, count(1) from srcpart where ds is not null group by ds -PREHOOK: type: QUERY -POSTHOOK: query: explain -select ds, count(1) from srcpart where ds is not null group by ds -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE - Group By Operator - aggregations: count(1) - keys: ds (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE - value expressions: _col1 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select ds, count(1) from srcpart where ds is not null group by ds -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: select ds, count(1) from srcpart where ds is not null group by ds -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -2008-04-08 1000 -2008-04-09 1000 diff --git ql/src/test/results/clientpositive/druid/druid_basic2.q.out ql/src/test/results/clientpositive/druid/druid_basic2.q.out deleted file mode 100644 index 1f7b8eb2f6..0000000000 --- ql/src/test/results/clientpositive/druid/druid_basic2.q.out +++ /dev/null @@ -1,1061 +0,0 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1_n2 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1_n2 -PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_1_n2 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_1_n2 -# col_name data_type comment -__time timestamp with local time zone from deserializer -robot string from deserializer -namespace string from deserializer -anonymous string from deserializer -unpatrolled string from deserializer -page string from deserializer -language string from deserializer -newpage string from deserializer -user string from deserializer -count float from deserializer -added float from deserializer -delta float from deserializer -variation float from deserializer -deleted float from deserializer - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: EXTERNAL_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}} - EXTERNAL TRUE - bucketing_version 2 - druid.datasource wikipedia - numFiles 0 - numRows 0 - rawDataSize 0 - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.druid.QTestDruidSerDe -InputFormat: null -OutputFormat: null -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: EXPLAIN EXTENDED -SELECT robot FROM druid_table_1_n2 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT robot FROM druid_table_1_n2 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["robot"],"resultFormat":"compactedList"} - druid.query.type scan - GatherStats: false - Select Operator - expressions: robot (type: string) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: EXPLAIN EXTENDED -SELECT delta FROM druid_table_1_n2 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT delta FROM druid_table_1_n2 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames delta - druid.fieldTypes float - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["delta"],"resultFormat":"compactedList"} - druid.query.type scan - GatherStats: false - Select Operator - expressions: delta (type: float) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: EXPLAIN EXTENDED -SELECT robot -FROM druid_table_1_n2 -WHERE language = 'en' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT robot -FROM druid_table_1_n2 -WHERE language = 'en' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"} - druid.query.type scan - GatherStats: false - Select Operator - expressions: robot (type: string) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: EXPLAIN EXTENDED -SELECT DISTINCT robot -FROM druid_table_1_n2 -WHERE language = 'en' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT DISTINCT robot -FROM druid_table_1_n2 -WHERE language = 'en' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot - druid.fieldTypes string - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"}],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - GatherStats: false - Select Operator - expressions: robot (type: string) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: EXPLAIN EXTENDED -SELECT a.robot, b.language -FROM -( - (SELECT robot, language - FROM druid_table_1_n2) a - JOIN - (SELECT language - FROM druid_table_1_n2) b - ON a.language = b.language -) -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT a.robot, b.language -FROM -( - (SELECT robot, language - FROM druid_table_1_n2) a - JOIN - (SELECT language - FROM druid_table_1_n2) b - ON a.language = b.language -) -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot,language - druid.fieldTypes string,string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["robot","language"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: robot (type: string), language (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col0 (type: string) - auto parallelism: true - Execution mode: vectorized, llap - LLAP IO: no inputs - Path -> Alias: - hdfs://### HDFS PATH ### [druid_table_1_n2] - Path -> Partition: - hdfs://### HDFS PATH ### - Partition - base file name: druid_table_1_n2 - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames robot,language - druid.fieldTypes string,string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["robot","language"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames robot,language - druid.fieldTypes string,string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["robot","language"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1_n2 - name: default.druid_table_1_n2 - Truncated Path -> Alias: - /druid_table_1_n2 [druid_table_1_n2] - Map 3 - Map Operator Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames language - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["language"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Reduce Output Operator - key expressions: language (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: language (type: string) - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE - tag: 1 - auto parallelism: true - Execution mode: vectorized, llap - LLAP IO: no inputs - Path -> Alias: - hdfs://### HDFS PATH ### [druid_table_1_n2] - Path -> Partition: - hdfs://### HDFS PATH ### - Partition - base file name: druid_table_1_n2 - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames language - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["language"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames language - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["language"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1_n2 - name: default.druid_table_1_n2 - Truncated Path -> Alias: - /druid_table_1_n2 [druid_table_1_n2] - Reducer 2 - Execution mode: llap - Needs Tagging: false - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 language (type: string) - outputColumnNames: _col0, _col2 - Position of Big Table: 0 - Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col2 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 - directory: hdfs://### HDFS PATH ### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE - Stats Publishing Key Prefix: hdfs://### HDFS PATH ### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -Warning: Shuffle Join MERGEJOIN[8][tables = [$hdt$_0, druid_table_1_n2]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: EXPLAIN EXTENDED -SELECT a.robot, b.language -FROM -( - (SELECT robot, language - FROM druid_table_1_n2 - WHERE language = 'en') a - JOIN - (SELECT language - FROM druid_table_1_n2) b - ON a.language = b.language -) -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT a.robot, b.language -FROM -( - (SELECT robot, language - FROM druid_table_1_n2 - WHERE language = 'en') a - JOIN - (SELECT language - FROM druid_table_1_n2) b - ON a.language = b.language -) -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (XPROD_EDGE), Map 3 (XPROD_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: robot (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col0 (type: string) - auto parallelism: false - Execution mode: vectorized, llap - LLAP IO: no inputs - Path -> Alias: - hdfs://### HDFS PATH ### [druid_table_1_n2] - Path -> Partition: - hdfs://### HDFS PATH ### - Partition - base file name: druid_table_1_n2 - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames robot - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames robot - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1_n2 - name: default.druid_table_1_n2 - Truncated Path -> Alias: - /druid_table_1_n2 [druid_table_1_n2] - Map 3 - Map Operator Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames vc - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"virtualColumns":[{"type":"expression","name":"vc","expression":"'en'","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - GatherStats: false - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - tag: 1 - auto parallelism: false - Execution mode: vectorized, llap - LLAP IO: no inputs - Path -> Alias: - hdfs://### HDFS PATH ### [druid_table_1_n2] - Path -> Partition: - hdfs://### HDFS PATH ### - Partition - base file name: druid_table_1_n2 - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames vc - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"virtualColumns":[{"type":"expression","name":"vc","expression":"'en'","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia - druid.fieldNames vc - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"virtualColumns":[{"type":"expression","name":"vc","expression":"'en'","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1_n2 - name: default.druid_table_1_n2 - Truncated Path -> Alias: - /druid_table_1_n2 [druid_table_1_n2] - Reducer 2 - Execution mode: llap - Needs Tagging: false - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 - 1 - outputColumnNames: _col0 - Position of Big Table: 0 - Statistics: Num rows: 1 Data size: 185 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: _col0 (type: string), 'en' (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 185 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 - directory: hdfs://### HDFS PATH ### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 185 Basic stats: PARTIAL Column stats: NONE - Stats Publishing Key Prefix: hdfs://### HDFS PATH ### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: EXPLAIN EXTENDED -SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1_n2 -GROUP BY robot, language, floor_day(`__time`) -ORDER BY CAST(robot AS INTEGER) ASC, m DESC -LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1_n2 -GROUP BY robot, language, floor_day(`__time`) -ORDER BY CAST(robot AS INTEGER) ASC, m DESC -LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot,floor_day,$f3,$f4,(tok_function tok_int (tok_table_or_col robot)) - druid.fieldTypes string,timestamp with local time zone,float,double,int - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"(tok_function tok_int (tok_table_or_col robot))","direction":"ascending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"(tok_function tok_int (tok_table_or_col robot))","expression":"CAST(\"robot\", 'LONG')"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - GatherStats: false - Select Operator - expressions: robot (type: string), floor_day (type: timestamp with local time zone), $f3 (type: float), $f4 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3 - ListSink - -PREHOOK: query: EXPLAIN -SELECT substring(namespace, CAST(deleted AS INT), 4) -FROM druid_table_1_n2 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT substring(namespace, CAST(deleted AS INT), 4) -FROM druid_table_1_n2 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames vc - druid.fieldTypes string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"substring(\"namespace\", (CAST(\"deleted\", 'LONG') - 1), 4)","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: string) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: EXPLAIN -SELECT robot, floor_day(`__time`) -FROM druid_table_1_n2 -WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' -GROUP BY robot, floor_day(`__time`) -ORDER BY robot -LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT robot, floor_day(`__time`) -FROM druid_table_1_n2 -WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' -GROUP BY robot, floor_day(`__time`) -ORDER BY robot -LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot,floor_day - druid.fieldTypes string,timestamp with local time zone - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]} - druid.query.type groupBy - Select Operator - expressions: robot (type: string), floor_day (type: timestamp with local time zone) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: EXPLAIN -SELECT robot, `__time` -FROM druid_table_1_n2 -WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' -GROUP BY robot, `__time` -ORDER BY robot -LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT robot, `__time` -FROM druid_table_1_n2 -WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' -GROUP BY robot, `__time` -ORDER BY robot -LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames extract,robot - druid.fieldTypes timestamp with local time zone,string - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]} - druid.query.type groupBy - Select Operator - expressions: robot (type: string), extract (type: timestamp with local time zone) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: EXPLAIN -SELECT robot, floor_day(`__time`) -FROM druid_table_1_n2 -WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' -GROUP BY robot, floor_day(`__time`) -ORDER BY robot -LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT robot, floor_day(`__time`) -FROM druid_table_1_n2 -WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' -GROUP BY robot, floor_day(`__time`) -ORDER BY robot -LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n2 - properties: - druid.fieldNames robot,floor_day - druid.fieldTypes string,timestamp with local time zone - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]} - druid.query.type groupBy - Select Operator - expressions: robot (type: string), floor_day (type: timestamp with local time zone) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: EXPLAIN EXTENDED -SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1_n2 -GROUP BY robot, language, floor_day(`__time`) -ORDER BY CAST(robot AS INTEGER) ASC, m DESC -LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1_n2 -GROUP BY robot, language, floor_day(`__time`) -ORDER BY CAST(robot AS INTEGER) ASC, m DESC -LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: druid_table_1_n2 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: __time (type: timestamp with local time zone), robot (type: string), language (type: string), added (type: float), delta (type: float) - outputColumnNames: __time, robot, language, added, delta - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: max(added), sum(delta) - keys: robot (type: string), language (type: string), floor_day(__time) (type: timestamp with local time zone) - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp with local time zone) - null sort order: aaa - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp with local time zone) - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: float), _col4 (type: double) - auto parallelism: true - Execution mode: llap - LLAP IO: no inputs - Path -> Alias: - hdfs://### HDFS PATH ### [druid_table_1_n2] - Path -> Partition: - hdfs://### HDFS PATH ### - Partition - base file name: druid_table_1_n2 - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - - input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat - output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}} - EXTERNAL TRUE - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted - columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer' - columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float - druid.datasource wikipedia -#### A masked pattern was here #### - location hdfs://### HDFS PATH ### - name default.druid_table_1_n2 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1_n2 - name: default.druid_table_1_n2 - Truncated Path -> Alias: - /druid_table_1_n2 [druid_table_1_n2] - Reducer 2 - Execution mode: llap - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0), sum(VALUE._col1) - keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: timestamp with local time zone) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col2 (type: timestamp with local time zone), _col3 (type: float), _col4 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToInteger(_col0) (type: int), _col2 (type: float) - null sort order: az - sort order: +- - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - tag: -1 - TopN: 10 - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: double) - auto parallelism: false - Reducer 3 - Execution mode: llap - Needs Tagging: false - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 - directory: hdfs://### HDFS PATH ### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 416 Basic stats: COMPLETE Column stats: NONE - Stats Publishing Key Prefix: hdfs://### HDFS PATH ### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:timestamp with local time zone:float:double - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: 10 - Processor Tree: - ListSink - diff --git ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out deleted file mode 100644 index 7338559137..0000000000 --- ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out +++ /dev/null @@ -1,862 +0,0 @@ -PREHOOK: query: drop table tstz1_n0 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstz1_n0 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create external table tstz1_n0(`__time` timestamp with local time zone, n string, v integer) -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tstz1_n0 -POSTHOOK: query: create external table tstz1_n0(`__time` timestamp with local time zone, n string, v integer) -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstz1_n0 -PREHOOK: query: insert into table tstz1_n0 -values(cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), 'Bill', 10) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tstz1_n0 -POSTHOOK: query: insert into table tstz1_n0 -values(cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), 'Bill', 10) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tstz1_n0 -PREHOOK: query: create table tstz1_n1(`__time` timestamp, n string, v integer) -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tstz1_n1 -POSTHOOK: query: create table tstz1_n1(`__time` timestamp, n string, v integer) -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstz1_n1 -PREHOOK: query: insert into table tstz1_n1 -values(cast('2016-01-03 12:26:34' as timestamp), 'Bill', 10) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tstz1_n1 -POSTHOOK: query: insert into table tstz1_n1 -values(cast('2016-01-03 12:26:34' as timestamp), 'Bill', 10) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tstz1_n1 -PREHOOK: query: EXPLAIN select `__time` from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select `__time` from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select `__time` from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select `__time` from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34.0 US/Pacific -PREHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34 -PREHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34 -PREHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes int - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_extract(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'HOUR','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: int) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -12 -PREHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'PT1H','','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:00:00 -PREHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames extract,$f1 - druid.fieldTypes timestamp with local time zone,int - druid.query.json {"queryType":"groupBy","dataSource":"default.tstz1_n0","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"v"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Select Operator - expressions: extract (type: timestamp with local time zone), $f1 (type: int) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34.0 US/Pacific 10 -PREHOOK: query: EXPLAIN select `__time` from tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select `__time` from tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select `__time` from tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select `__time` from tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34 -PREHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames vc - druid.fieldTypes int - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_extract(\"__time\",'HOUR','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: int) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -12 -PREHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(\"__time\",'PT1H','','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:00:00 -PREHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames extract,$f1 - druid.fieldTypes timestamp,int - druid.query.json {"queryType":"groupBy","dataSource":"default.tstz1_n1","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"v"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Select Operator - expressions: extract (type: timestamp), $f1 (type: int) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34 10 -PREHOOK: query: EXPLAIN select `__time` from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select `__time` from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select `__time` from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select `__time` from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:26:34.0 UTC -PREHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:26:34 -PREHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:26:34 -PREHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 20:26:34' as timestamp) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 20:26:34' as timestamp) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 20:26:34' as timestamp) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 20:26:34' as timestamp) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:26:34 -PREHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/2016-01-03T20:26:34.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"1451852794000","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:26:34 -PREHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes int - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_extract(\"__time\",'HOUR','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: int) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -20 -PREHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(\"__time\",'PT1H','','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:00:00 -PREHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n0 - properties: - druid.fieldNames extract,$f1 - druid.fieldTypes timestamp with local time zone,int - druid.query.json {"queryType":"groupBy","dataSource":"default.tstz1_n0","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"v"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Select Operator - expressions: extract (type: timestamp with local time zone), $f1 (type: int) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT `__time`, max(v) FROM tstz1_n0 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 20:26:34.0 UTC 10 -PREHOOK: query: EXPLAIN select `__time` from tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN select `__time` from tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: select `__time` from tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select `__time` from tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34 -PREHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames vc - druid.fieldTypes int - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_extract(\"__time\",'HOUR','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: int) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -12 -PREHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp - druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(\"__time\",'PT1H','','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: vc (type: timestamp) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:00:00 -PREHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: tstz1_n1 - properties: - druid.fieldNames extract,$f1 - druid.fieldTypes timestamp,int - druid.query.json {"queryType":"groupBy","dataSource":"default.tstz1_n1","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"v"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Select Operator - expressions: extract (type: timestamp), $f1 (type: int) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1_n1 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT `__time`, max(v) FROM tstz1_n1 GROUP BY `__time` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1_n1 -POSTHOOK: Output: hdfs://### HDFS PATH ### -2016-01-03 12:26:34 10 diff --git ql/src/test/results/clientpositive/druid/druidmini_joins.q.out ql/src/test/results/clientpositive/druid/druidmini_joins.q.out deleted file mode 100644 index 6c9a9642ab..0000000000 --- ql/src/test/results/clientpositive/druid/druidmini_joins.q.out +++ /dev/null @@ -1,248 +0,0 @@ -PREHOOK: query: DROP TABLE druid_table_with_nulls -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE druid_table_with_nulls -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_with_nulls -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -AS -SELECT cast(current_timestamp() AS timestamp with local time zone) AS `__time`, - cast(username AS string) AS username, - cast(double1 AS double) AS double1, - cast(int1 AS int) AS int1 -FROM TABLE ( - VALUES - ('alfred', 10.30, 2), - ('bob', 3.14, null), - ('bonnie', null, 3), - ('calvin', null, null), - ('charlie', 9.8, 1), - ('charlie', 15.8, 1)) as q (username, double1, int1) -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_with_nulls -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_with_nulls -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -AS -SELECT cast(current_timestamp() AS timestamp with local time zone) AS `__time`, - cast(username AS string) AS username, - cast(double1 AS double) AS double1, - cast(int1 AS int) AS int1 -FROM TABLE ( - VALUES - ('alfred', 10.30, 2), - ('bob', 3.14, null), - ('bonnie', null, 3), - ('calvin', null, null), - ('charlie', 9.8, 1), - ('charlie', 15.8, 1)) as q (username, double1, int1) -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_with_nulls -POSTHOOK: Lineage: druid_table_with_nulls.__time SIMPLE [] -POSTHOOK: Lineage: druid_table_with_nulls.double1 SCRIPT [] -POSTHOOK: Lineage: druid_table_with_nulls.int1 SCRIPT [] -POSTHOOK: Lineage: druid_table_with_nulls.username SCRIPT [] -PREHOOK: query: EXPLAIN SELECT -username AS `username`, -SUM(double1) AS `sum_double1` -FROM -druid_table_with_nulls `tbl1` - JOIN ( - SELECT - username AS `username`, - SUM(double1) AS `sum_double2` - FROM druid_table_with_nulls - GROUP BY `username` - ORDER BY `sum_double2` - DESC LIMIT 10 - ) - `tbl2` - ON (`tbl1`.`username` = `tbl2`.`username`) -GROUP BY `tbl1`.`username` ORDER BY `sum_double1` -PREHOOK: type: QUERY -PREHOOK: Input: default@druid_table_with_nulls -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN SELECT -username AS `username`, -SUM(double1) AS `sum_double1` -FROM -druid_table_with_nulls `tbl1` - JOIN ( - SELECT - username AS `username`, - SUM(double1) AS `sum_double2` - FROM druid_table_with_nulls - GROUP BY `username` - ORDER BY `sum_double2` - DESC LIMIT 10 - ) - `tbl2` - ON (`tbl1`.`username` = `tbl2`.`username`) -GROUP BY `tbl1`.`username` ORDER BY `sum_double1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@druid_table_with_nulls -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - Reducer 4 <- Reducer 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: tbl1 - properties: - druid.fieldNames username,$f1 - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_with_nulls","granularity":"all","dimensions":[{"type":"default","dimension":"username","outputName":"username","outputType":"STRING"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f1","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"double1"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: username (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: _col0 is not null (type: boolean) - Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: tbl1 - properties: - druid.fieldNames username,double1 - druid.fieldTypes string,double - druid.query.json {"queryType":"scan","dataSource":"default.druid_table_with_nulls","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"username","value":null}},"columns":["username","double1"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: username (type: string) - sort order: + - Map-reduce partition columns: username (type: string) - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE - value expressions: double1 (type: double) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 username (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 1267 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: string) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 1267 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 1267 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: double) - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 633 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: double) - sort order: + - Statistics: Num rows: 3 Data size: 633 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 633 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 3 Data size: 633 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT -username AS `username`, -SUM(double1) AS `sum_double1` -FROM -druid_table_with_nulls `tbl1` - JOIN ( - SELECT - username AS `username`, - SUM(double1) AS `sum_double2` - FROM druid_table_with_nulls - GROUP BY `username` - ORDER BY `sum_double2` - DESC LIMIT 10 - ) - `tbl2` - ON (`tbl1`.`username` = `tbl2`.`username`) -GROUP BY `tbl1`.`username` ORDER BY `sum_double1` -PREHOOK: type: QUERY -PREHOOK: Input: default@druid_table_with_nulls -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT -username AS `username`, -SUM(double1) AS `sum_double1` -FROM -druid_table_with_nulls `tbl1` - JOIN ( - SELECT - username AS `username`, - SUM(double1) AS `sum_double2` - FROM druid_table_with_nulls - GROUP BY `username` - ORDER BY `sum_double2` - DESC LIMIT 10 - ) - `tbl2` - ON (`tbl1`.`username` = `tbl2`.`username`) -GROUP BY `tbl1`.`username` ORDER BY `sum_double1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@druid_table_with_nulls -POSTHOOK: Output: hdfs://### HDFS PATH ### -bonnie 0.0 -calvin 0.0 -bob 3.14 -alfred 10.3 -charlie 25.6 diff --git ql/src/test/results/clientpositive/druid/druidmini_masking.q.out ql/src/test/results/clientpositive/druid/druidmini_masking.q.out deleted file mode 100644 index 0262e9b67c..0000000000 --- ql/src/test/results/clientpositive/druid/druidmini_masking.q.out +++ /dev/null @@ -1,80 +0,0 @@ -PREHOOK: query: CREATE EXTERNAL TABLE masking_test_druid -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -AS - SELECT cast(current_timestamp() AS timestamp with local time zone) AS `__time`, - cast(username AS string) AS username, - cast(double1 AS double) AS double1, - cast(key AS int) AS key - FROM TABLE ( - VALUES - ('alfred', 10.30, -2), - ('bob', 3.14, null), - ('bonnie', null, 100), - ('calvin', null, null), - ('charlie', 15.8, 20)) as q (username, double1, key) -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test_druid -POSTHOOK: query: CREATE EXTERNAL TABLE masking_test_druid -STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' -TBLPROPERTIES ("druid.segment.granularity" = "HOUR") -AS - SELECT cast(current_timestamp() AS timestamp with local time zone) AS `__time`, - cast(username AS string) AS username, - cast(double1 AS double) AS double1, - cast(key AS int) AS key - FROM TABLE ( - VALUES - ('alfred', 10.30, -2), - ('bob', 3.14, null), - ('bonnie', null, 100), - ('calvin', null, null), - ('charlie', 15.8, 20)) as q (username, double1, key) -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test_druid -POSTHOOK: Lineage: masking_test_druid.__time SIMPLE [] -POSTHOOK: Lineage: masking_test_druid.double1 SCRIPT [] -POSTHOOK: Lineage: masking_test_druid.key SCRIPT [] -POSTHOOK: Lineage: masking_test_druid.username SCRIPT [] -PREHOOK: query: explain select username, key from masking_test_druid -PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_druid -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain select username, key from masking_test_druid -POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_druid -POSTHOOK: Output: hdfs://### HDFS PATH ### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: masking_test_druid - properties: - druid.fieldNames username,key - druid.fieldTypes string,int - druid.query.json {"queryType":"scan","dataSource":"default.masking_test_druid","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"bound","dimension":"key","lower":"10","lowerStrict":true,"ordering":"numeric"},"columns":["username","key"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: username (type: string), key (type: int) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: select username, key from masking_test_druid -PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_druid -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select username, key from masking_test_druid -POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_druid -POSTHOOK: Output: hdfs://### HDFS PATH ### -bonnie 100 -charlie 20 diff --git ql/src/test/results/clientpositive/druid_basic1.q.out ql/src/test/results/clientpositive/druid_basic1.q.out deleted file mode 100644 index a5c0687620..0000000000 --- ql/src/test/results/clientpositive/druid_basic1.q.out +++ /dev/null @@ -1,140 +0,0 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1 -PREHOOK: query: DESCRIBE FORMATTED druid_table_1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_1 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_1 -# col_name data_type comment -__time timestamp with local time zone from deserializer -robot string from deserializer -namespace string from deserializer -anonymous string from deserializer -unpatrolled string from deserializer -page string from deserializer -language string from deserializer -newpage string from deserializer -user string from deserializer -count float from deserializer -added float from deserializer -delta float from deserializer -variation float from deserializer -deleted float from deserializer - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: EXTERNAL_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}} - EXTERNAL TRUE - bucketing_version 2 - druid.datasource wikipedia - numFiles 0 - numRows 0 - rawDataSize 0 - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.druid.QTestDruidSerDe -InputFormat: null -OutputFormat: null -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_2 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_2 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_2 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_2 -PREHOOK: query: DESCRIBE FORMATTED druid_table_2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_2 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_2 -# col_name data_type comment -__time timestamp with local time zone from deserializer -robot string from deserializer -namespace string from deserializer -anonymous string from deserializer -unpatrolled string from deserializer -page string from deserializer -language string from deserializer -newpage string from deserializer -user string from deserializer -count float from deserializer -added float from deserializer -delta float from deserializer -variation float from deserializer -deleted float from deserializer - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: EXTERNAL_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}} - EXTERNAL TRUE - bucketing_version 2 - druid.datasource wikipedia - numFiles 0 - numRows 0 - rawDataSize 0 - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.druid.QTestDruidSerDe -InputFormat: null -OutputFormat: null -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: DROP TABLE druid_table_2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@druid_table_2 -PREHOOK: Output: default@druid_table_2 -POSTHOOK: query: DROP TABLE druid_table_2 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@druid_table_2 -POSTHOOK: Output: default@druid_table_2 -PREHOOK: query: DROP TABLE druid_table_1 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@druid_table_1 -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: DROP TABLE druid_table_1 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@druid_table_1 -POSTHOOK: Output: default@druid_table_1 diff --git ql/src/test/results/clientpositive/druid_basic3.q.out ql/src/test/results/clientpositive/druid_basic3.q.out deleted file mode 100644 index 54719f7517..0000000000 --- ql/src/test/results/clientpositive/druid_basic3.q.out +++ /dev/null @@ -1,476 +0,0 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n4 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1_n4 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n4 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1_n4 -PREHOOK: query: EXPLAIN -SELECT sum(added) + sum(delta) as a, language -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT sum(added) + sum(delta) as a, language -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames a,language - druid.fieldTypes double,string - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" + \"$f2\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: a (type: double), language (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT sum(delta), sum(added) + sum(delta) AS a, language -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT sum(delta), sum(added) + sum(delta) AS a, language -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames $f1,a,language - druid.fieldTypes double,double,string - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"delta"},{"type":"doubleSum","name":"$f2","fieldName":"added"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f2\" + \"$f1\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: $f1 (type: double), a (type: double), language (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, sum(added) / sum(delta) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, sum(added) / sum(delta) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" / \"$f2\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, sum(added) * sum(delta) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, sum(added) * sum(delta) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" * \"$f2\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, sum(added) - sum(delta) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, sum(added) - sum(delta) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" - \"$f2\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, sum(added) + 100 AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, sum(added) + 100 AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" + CAST(100, 'DOUBLE'))"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, -1 * (a + b) AS c -FROM ( - SELECT (sum(added)-sum(delta)) / (count(*) * 3) AS a, sum(deleted) AS b, language - FROM druid_table_1_n4 - GROUP BY language) subq -ORDER BY c DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, -1 * (a + b) AS c -FROM ( - SELECT (sum(added)-sum(delta)) / (count(*) * 3) AS a, sum(deleted) AS b, language - FROM druid_table_1_n4 - GROUP BY language) subq -ORDER BY c DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,c - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"c","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"},{"type":"count","name":"$f3"},{"type":"doubleSum","name":"$f4","fieldName":"deleted"}],"postAggregations":[{"type":"expression","name":"c","expression":"(-1.0 * (((\"$f1\" - \"$f2\") / CAST((\"$f3\" * 3), 'DOUBLE')) + \"$f4\"))"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), c (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, robot, sum(added) - sum(delta) AS a -FROM druid_table_1_n4 -WHERE extract (week from `__time`) IN (10,11) -GROUP BY language, robot -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, robot, sum(added) - sum(delta) AS a -FROM druid_table_1_n4 -WHERE extract (week from `__time`) IN (10,11) -GROUP BY language, robot -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,robot,a - druid.fieldTypes string,string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default"},"filter":{"type":"in","dimension":"__time","values":["10","11"],"extractionFn":{"type":"timeFormat","format":"w","timeZone":"US/Pacific","locale":"en-US"}},"aggregations":[{"type":"doubleSum","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f2\" - \"$f3\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), robot (type: string), a (type: double) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, sum(delta) / count(*) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, sum(delta) / count(*) AS a -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a - druid.fieldTypes string,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"delta"},{"type":"count","name":"$f2"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" / CAST(\"$f2\", 'DOUBLE'))"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, sum(added) / sum(delta) AS a, - CASE WHEN sum(deleted)=0 THEN 1.0 ELSE sum(deleted) END AS b -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, sum(added) / sum(delta) AS a, - CASE WHEN sum(deleted)=0 THEN 1.0 ELSE sum(deleted) END AS b -FROM druid_table_1_n4 -GROUP BY language -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a,b - druid.fieldTypes string,double,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"},{"type":"doubleSum","name":"$f3","fieldName":"deleted"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" / \"$f2\")"},{"type":"expression","name":"b","expression":"case_searched((\"$f3\" == 0.0),1,\"$f3\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double), b (type: double) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, a, a - b as c -FROM ( - SELECT language, sum(added) + 100 AS a, sum(delta) AS b - FROM druid_table_1_n4 - GROUP BY language) subq -ORDER BY a DESC -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, a, a - b as c -FROM ( - SELECT language, sum(added) + 100 AS a, sum(delta) AS b - FROM druid_table_1_n4 - GROUP BY language) subq -ORDER BY a DESC -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames language,a,c - druid.fieldTypes string,double,double - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","columns":[{"dimension":"a","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"a","expression":"(\"$f1\" + CAST(100, 'DOUBLE'))"},{"type":"expression","name":"c","expression":"((\"$f1\" + CAST(100, 'DOUBLE')) - \"$f2\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), a (type: double), c (type: double) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, robot, "A" -FROM ( - SELECT sum(added) - sum(delta) AS a, language, robot - FROM druid_table_1_n4 - GROUP BY language, robot ) subq -ORDER BY "A" -LIMIT 5 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, robot, "A" -FROM ( - SELECT sum(added) - sum(delta) AS a, language, robot - FROM druid_table_1_n4 - GROUP BY language, robot ) subq -ORDER BY "A" -LIMIT 5 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames robot,language - druid.fieldTypes string,string - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","limit":5,"columns":[]},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), robot (type: string), 'A' (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT language, robot, "A" -FROM ( - SELECT language, sum(added) + sum(delta) AS a, robot - FROM druid_table_1_n4 - GROUP BY language, robot) subq -ORDER BY robot, language -LIMIT 5 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT language, robot, "A" -FROM ( - SELECT language, sum(added) + sum(delta) AS a, robot - FROM druid_table_1_n4 - GROUP BY language, robot) subq -ORDER BY robot, language -LIMIT 5 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n4 - properties: - druid.fieldNames robot,language - druid.fieldTypes string,string - druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"}],"limitSpec":{"type":"default","limit":5,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"},{"dimension":"language","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} - druid.query.type groupBy - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: language (type: string), robot (type: string), 'A' (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - diff --git ql/src/test/results/clientpositive/druid_intervals.q.out ql/src/test/results/clientpositive/druid_intervals.q.out deleted file mode 100644 index 715623ad61..0000000000 --- ql/src/test/results/clientpositive/druid_intervals.q.out +++ /dev/null @@ -1,386 +0,0 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n0 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1_n0 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n0 -STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' -TBLPROPERTIES ("druid.datasource" = "wikipedia") -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1_n0 -PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n0 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_1_n0 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n0 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_1_n0 -# col_name data_type comment -__time timestamp with local time zone from deserializer -robot string from deserializer -namespace string from deserializer -anonymous string from deserializer -unpatrolled string from deserializer -page string from deserializer -language string from deserializer -newpage string from deserializer -user string from deserializer -count float from deserializer -added float from deserializer -delta float from deserializer -variation float from deserializer -deleted float from deserializer - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: EXTERNAL_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}} - EXTERNAL TRUE - bucketing_version 2 - druid.datasource wikipedia - numFiles 0 - numRows 0 - rawDataSize 0 - storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.druid.QTestDruidSerDe -InputFormat: null -OutputFormat: null -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` < '2012-03-01 00:00:00' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` < '2012-03-01 00:00:00' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/2012-03-01T08:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2012-03-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' - AND `__time` < '2011-01-01 00:00:00' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' - AND `__time` < '2011-01-01 00:00:00' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2011-01-01T08:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2011-01-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') - OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00') -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') - OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00') -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2011-01-01T08:00:00.001Z","2012-01-01T08:00:00.000Z/2013-01-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') - OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00') -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') - OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00') -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2012-01-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time` -FROM druid_table_1_n0 -WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc - druid.fieldTypes timestamp with local time zone - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2010-01-01T08:00:00.001Z","2011-01-01T08:00:00.000Z/2011-01-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time`, robot -FROM druid_table_1_n0 -WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time`, robot -FROM druid_table_1_n0 -WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc,vc0 - druid.fieldTypes timestamp with local time zone,string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["2010-01-01T08:00:00.000Z/2010-01-01T08:00:00.001Z","2011-01-01T08:00:00.000Z/2011-01-01T08:00:00.001Z"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"},{"type":"expression","name":"vc0","expression":"'user1'","outputType":"STRING"}],"columns":["vc","vc0"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone), vc0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - -PREHOOK: query: EXPLAIN -SELECT `__time`, robot -FROM druid_table_1_n0 -WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT `__time`, robot -FROM druid_table_1_n0 -WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: druid_table_1_n0 - properties: - druid.fieldNames vc,robot - druid.fieldTypes timestamp with local time zone,string - druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"or","fields":[{"type":"selector","dimension":"robot","value":"user1"},{"type":"selector","dimension":"__time","value":"2010-01-01T08:00:00.000Z","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"selector","dimension":"__time","value":"2011-01-01T08:00:00.000Z","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}]},"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc","robot"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: vc (type: timestamp with local time zone), robot (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - ListSink - diff --git ql/src/test/results/clientpositive/fouter_join_ppr.q.out ql/src/test/results/clientpositive/fouter_join_ppr.q.out deleted file mode 100644 index f41f121cf1..0000000000 --- ql/src/test/results/clientpositive/fouter_join_ppr.q.out +++ /dev/null @@ -1,1200 +0,0 @@ -PREHOOK: query: EXPLAIN EXTENDED - FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key AND b.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED - FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key AND b.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: string) - auto parallelism: false - TableScan - alias: b - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src -#### A masked pattern was here #### - Partition - base file name: hr=11 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 11 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart -#### A masked pattern was here #### - Partition - base file name: hr=12 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 12 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart - Truncated Path -> Alias: - /src [$hdt$_0:a] - /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b] - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key AND b.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key AND b.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -17 val_17 17 val_17 -17 val_17 17 val_17 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -19 val_19 19 val_19 -19 val_19 19 val_19 -PREHOOK: query: EXPLAIN EXTENDED - FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key AND a.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED - FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key AND a.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: string) - auto parallelism: false - TableScan - alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src -#### A masked pattern was here #### - Partition - base file name: hr=11 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 11 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart -#### A masked pattern was here #### - Partition - base file name: hr=12 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 12 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart - Truncated Path -> Alias: - /src [$hdt$_1:b] - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:a] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:a] - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key AND a.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key AND a.ds = '2008-04-08') - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -17 val_17 17 val_17 -17 val_17 17 val_17 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -19 val_19 19 val_19 -19 val_19 19 val_19 -PREHOOK: query: EXPLAIN EXTENDED - FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED - FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: string) - auto parallelism: false - TableScan - alias: b - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src -#### A masked pattern was here #### - Partition - base file name: hr=11 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 11 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart -#### A masked pattern was here #### - Partition - base file name: hr=12 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 12 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart - Truncated Path -> Alias: - /src [$hdt$_0:a] - /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b] - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08' -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: FROM - src a - FULL OUTER JOIN - srcpart b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -17 val_17 17 val_17 -17 val_17 17 val_17 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -19 val_19 19 val_19 -19 val_19 19 val_19 -PREHOOK: query: EXPLAIN EXTENDED - FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED - FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: string) - auto parallelism: false - TableScan - alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src -#### A masked pattern was here #### - Partition - base file name: hr=11 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 11 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart -#### A masked pattern was here #### - Partition - base file name: hr=12 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 12 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - numFiles 1 - numRows 500 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart - Truncated Path -> Alias: - /src [$hdt$_1:b] - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:a] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:a] - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08' -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: FROM - srcpart a - FULL OUTER JOIN - src b - ON (a.key = b.key) - SELECT a.key, a.value, b.key, b.value - WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -17 val_17 17 val_17 -17 val_17 17 val_17 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -18 val_18 18 val_18 -19 val_19 19 val_19 -19 val_19 19 val_19 diff --git ql/src/test/results/clientpositive/fullouter_mapjoin_1.q.out ql/src/test/results/clientpositive/fullouter_mapjoin_1.q.out deleted file mode 100644 index 0ae9df92f2..0000000000 --- ql/src/test/results/clientpositive/fullouter_mapjoin_1.q.out +++ /dev/null @@ -1,176 +0,0 @@ -PREHOOK: query: CREATE TABLE fullouter_long_big_1a(key bigint) -row format delimited fields terminated by ',' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@fullouter_long_big_1a -POSTHOOK: query: CREATE TABLE fullouter_long_big_1a(key bigint) -row format delimited fields terminated by ',' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@fullouter_long_big_1a -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/fullouter_long_big_1a.txt' OVERWRITE INTO TABLE fullouter_long_big_1a -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@fullouter_long_big_1a -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/fullouter_long_big_1a.txt' OVERWRITE INTO TABLE fullouter_long_big_1a -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@fullouter_long_big_1a -PREHOOK: query: CREATE TABLE fullouter_long_small_1a(key bigint, s_date date) -row format delimited fields terminated by ',' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@fullouter_long_small_1a -POSTHOOK: query: CREATE TABLE fullouter_long_small_1a(key bigint, s_date date) -row format delimited fields terminated by ',' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@fullouter_long_small_1a -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/fullouter_long_small_1a.txt' OVERWRITE INTO TABLE fullouter_long_small_1a -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@fullouter_long_small_1a -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/fullouter_long_small_1a.txt' OVERWRITE INTO TABLE fullouter_long_small_1a -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@fullouter_long_small_1a -PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT b.key, s.key, s.s_date FROM fullouter_long_big_1a b FULL OUTER JOIN fullouter_long_small_1a s ON b.key = s.key -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT b.key, s.key, s.s_date FROM fullouter_long_big_1a b FULL OUTER JOIN fullouter_long_small_1a s ON b.key = s.key -POSTHOOK: type: QUERY -PLAN VECTORIZATION: - enabled: false - enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 1 Data size: 205 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 205 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: bigint) - sort order: + - Map-reduce partition columns: _col0 (type: bigint) - Statistics: Num rows: 1 Data size: 205 Basic stats: COMPLETE Column stats: NONE - TableScan - alias: s - Statistics: Num rows: 1 Data size: 1640 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: bigint), s_date (type: date) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 1640 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: bigint) - sort order: + - Map-reduce partition columns: _col0 (type: bigint) - Statistics: Num rows: 1 Data size: 1640 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: date) - Reduce Operator Tree: - Join Operator - condition map: - Full Outer Join 0 to 1 - keys: - 0 _col0 (type: bigint) - 1 _col0 (type: bigint) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 225 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 225 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT b.key, s.key, s.s_date FROM fullouter_long_big_1a b FULL OUTER JOIN fullouter_long_small_1a s ON b.key = s.key -PREHOOK: type: QUERY -PREHOOK: Input: default@fullouter_long_big_1a -PREHOOK: Input: default@fullouter_long_small_1a -#### A masked pattern was here #### -POSTHOOK: query: SELECT b.key, s.key, s.s_date FROM fullouter_long_big_1a b FULL OUTER JOIN fullouter_long_small_1a s ON b.key = s.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@fullouter_long_big_1a -POSTHOOK: Input: default@fullouter_long_small_1a -#### A masked pattern was here #### --5206670856103795573 NULL NULL --5310365297525168078 NULL NULL --6187919478609154811 NULL NULL --6187919478609154811 NULL NULL --6187919478609154811 NULL NULL --6187919478609154811 NULL NULL --8460550397108077433 NULL NULL -1569543799237464101 NULL NULL -3313583664488247651 NULL NULL -968819023021777205 NULL NULL -NULL -1339636982994067311 2000-06-20 -NULL -1339636982994067311 2008-12-03 -NULL -2098090254092150988 1817-03-12 -NULL -2098090254092150988 2163-05-26 -NULL -2098090254092150988 2219-12-23 -NULL -2184423060953067642 1853-07-06 -NULL -2184423060953067642 1880-10-06 -NULL -2575185053386712613 1809-07-12 -NULL -2575185053386712613 2105-01-21 -NULL -2688622006344936758 1948-10-15 -NULL -2688622006344936758 2129-01-11 -NULL -327698348664467755 2222-10-15 -NULL -3655445881497026796 2108-08-16 -NULL -4224290881682877258 1813-05-17 -NULL -4224290881682877258 2120-01-16 -NULL -4224290881682877258 2185-07-08 -NULL -4961171400048338491 2196-08-10 -NULL -5706981533666803767 1800-09-20 -NULL -5706981533666803767 2151-06-09 -NULL -5754527700632192146 1958-07-15 -NULL -614848861623872247 2101-05-25 -NULL -614848861623872247 2112-11-09 -NULL -6784441713807772877 1845-02-16 -NULL -6784441713807772877 2054-06-17 -NULL -7707546703881534780 2134-08-20 -NULL 214451696109242839 1855-05-12 -NULL 214451696109242839 1977-01-04 -NULL 214451696109242839 2179-04-18 -NULL 2438535236662373438 1881-09-16 -NULL 2438535236662373438 1916-01-10 -NULL 2438535236662373438 2026-06-23 -NULL 3845554233155411208 1805-11-10 -NULL 3845554233155411208 2264-04-05 -NULL 3873405809071478736 1918-11-20 -NULL 3873405809071478736 2034-06-09 -NULL 3873405809071478736 2164-04-23 -NULL 3905351789241845882 1866-07-28 -NULL 3905351789241845882 2045-12-05 -NULL 434940853096155515 2275-02-08 -NULL 4436884039838843341 2031-05-23 -NULL 5246983111579595707 1817-07-01 -NULL 5246983111579595707 2260-05-11 -NULL 5252407779338300447 2039-03-10 -NULL 5252407779338300447 2042-04-26 -NULL 6049335087268933751 2086-12-17 -NULL 6049335087268933751 2282-06-09 -NULL 7297177530102477725 1921-05-11 -NULL 7297177530102477725 1926-04-12 -NULL 7297177530102477725 2125-08-26 -NULL 7937120928560087303 2083-03-14 -NULL 8755921538765428593 1827-05-01 -NULL NULL 2024-01-23 -NULL NULL 2098-02-10 -NULL NULL 2242-02-08 diff --git ql/src/test/results/clientpositive/groupby_sort_1.q.out ql/src/test/results/clientpositive/groupby_sort_1.q.out deleted file mode 100644 index 5784b3e418..0000000000 --- ql/src/test/results/clientpositive/groupby_sort_1.q.out +++ /dev/null @@ -1,6590 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) -CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) -CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl2 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), val (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:string:int -#### A masked pattern was here #### - name default.outputtbl2 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:string:int -#### A masked pattern was here #### - name default.outputtbl2 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 -#### A masked pattern was here #### -1 11 1 -2 12 1 -3 13 1 -7 17 1 -8 18 1 -8 28 1 -PREHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - val - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - k - TOK_SELEXPR - TOK_TABLE_OR_COL - val - v - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - k - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - k - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl3 - TOK_SELECT - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - 1 - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: 1 (type: int), key (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 SIMPLE [] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -1 1 1 -1 2 1 -1 3 1 -1 7 1 -1 8 2 -PREHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl4 -PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - 1 - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), 1 (type: int), val (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 1 11 1 -2 1 12 1 -3 1 13 1 -7 1 17 1 -8 1 18 1 -8 1 28 1 -PREHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl3 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - + - TOK_TABLE_OR_COL - key - 1 - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - + - TOK_TABLE_OR_COL - key - 1 - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), (key + 1) (type: double) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: double) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: double) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - numFiles 1 - numRows 5 - rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - numFiles 1 - numRows 5 - rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -1 2 1 -2 3 1 -3 4 1 -7 8 1 -8 9 2 -PREHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -PREHOOK: type: QUERY -POSTHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - cnt - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - sum - TOK_TABLE_OR_COL - cnt - TOK_GROUPBY - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: sum(_col1) - keys: (_col0 + _col0) (type: double) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: double) - sort order: + - Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: double) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -14 1 -16 2 -2 1 -4 1 -6 1 -PREHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_UNION - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_ALLCOLREF - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [null-subquery1:subq1-subquery1:t1, null-subquery2:subq1-subquery2:t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, (t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -1 1 -2 1 -2 1 -3 1 -3 1 -7 1 -7 1 -8 2 -8 2 -PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) FROM T1 GROUP BY key + key -) subq1 -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) FROM T1 GROUP BY key + key -) subq1 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_UNION - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_ALLCOLREF - - -STAGE DEPENDENCIES: - Stage-9 is a root stage - Stage-2 depends on stages: Stage-9 - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-3 depends on stages: Stage-0 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - -STAGE PLANS: - Stage: Stage-9 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: (key + key) (type: double) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: double) - sort order: + - Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [null-subquery2:subq1-subquery2:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: double) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: double), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToDouble(_col0) (type: double), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - TableScan - GatherStats: false - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10002 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [null-subquery1:subq1-subquery1:t1] -#### A masked pattern was here #### - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key -) subq1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key -) subq1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, (t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -14 1 -16 2 -2 1 -2 1 -3 1 -4 1 -6 1 -7 1 -8 2 -PREHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_JOIN - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - cnt - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - cnt - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq2 - = - . - TOK_TABLE_OR_COL - subq1 - key - . - TOK_TABLE_OR_COL - subq2 - key - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - . - TOK_TABLE_OR_COL - subq1 - key - TOK_SELEXPR - + - . - TOK_TABLE_OR_COL - subq1 - cnt - . - TOK_TABLE_OR_COL - subq2 - cnt - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: bigint) - auto parallelism: false - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1, subq2:t1] - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} - 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger((_col1 + _col3)) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 42 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 42 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, (t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 2 -2 2 -3 2 -7 2 -8 4 -PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED -SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 -ON subq1.key = subq2.key -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED -SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 -ON subq1.key = subq2.key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_JOIN - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - val - subq2 - = - . - TOK_TABLE_OR_COL - subq1 - key - . - TOK_TABLE_OR_COL - subq2 - key - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_ALLCOLREF - - -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-1 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), val (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq2:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: bigint) - auto parallelism: false - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string), _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10002 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] -#### A masked pattern was here #### - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} - 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string), _col4 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3,_col4 - columns.types string:bigint:string:string:bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) -CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) -CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -PREHOOK: type: QUERY -POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - bucketGroup: true - keys: key (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - 1 - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), 1 (type: int), val (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 1 11 1 -2 1 12 1 -3 1 13 1 -7 1 17 1 -8 1 18 1 -8 1 28 1 -PREHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl5 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl5 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - 2 - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - 1 - TOK_TABLE_OR_COL - val - 2 - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), 1 (type: int), val (type: string), 2 (type: int) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl5 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl5 -POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key4 SIMPLE [] -PREHOOK: query: SELECT * FROM outputTbl5 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl5 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl5 -#### A masked pattern was here #### -1 1 11 2 1 -2 1 12 2 1 -3 1 13 2 1 -7 1 17 2 1 -8 1 18 2 1 -8 1 28 2 1 -PREHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -PREHOOK: type: QUERY -POSTHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - constant - TOK_SELEXPR - TOK_TABLE_OR_COL - val - subq - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - constant - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - constant - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), 1 (type: int), val (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col1 (type: int), _col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [subq:t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 1 11 1 -2 1 12 1 -3 1 13 1 -7 1 17 1 -8 1 18 1 -8 1 28 1 -PREHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -PREHOOK: type: QUERY -POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - constant - TOK_SELEXPR - TOK_TABLE_OR_COL - val - subq - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - constant - constant2 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - 2 - constant3 - subq2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - constant3 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - constant3 - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), 2 (type: int), val (type: string) - outputColumnNames: _col0, _col3, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col3 (type: int), _col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [subq2:subq:t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 2 11 1 -2 2 12 1 -3 2 13 1 -7 2 17 1 -8 2 18 1 -8 2 28 1 -PREHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 -PREHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-2 - Stage-4 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - bucketGroup: true - keys: key (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: bigint) - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), val (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-3 - Stats-Aggr Operator - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-4 - Stats-Aggr Operator - -PREHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: select * from DEST2 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 -#### A masked pattern was here #### -1 11 1 -2 12 1 -3 13 1 -7 17 1 -8 18 1 -8 28 1 -PREHOOK: query: -- multi-table insert with a sub-query -EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -POSTHOOK: query: -- multi-table insert with a sub-query -EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-2 - Stage-4 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key = 8) (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: val (type: string) - outputColumnNames: _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '8' (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - bucketGroup: true - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: bigint) - Select Operator - expressions: '8' (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col1 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-3 - Stats-Aggr Operator - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-4 - Stats-Aggr Operator - -PREHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -8 2 -PREHOOK: query: select * from DEST2 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 -#### A masked pattern was here #### -8 18 1 -8 28 1 diff --git ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out deleted file mode 100644 index e912a29716..0000000000 --- ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out +++ /dev/null @@ -1,7097 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) -CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) -CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl2 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), val (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:string:int -#### A masked pattern was here #### - name default.outputtbl2 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:string:int -#### A masked pattern was here #### - name default.outputtbl2 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 -#### A masked pattern was here #### -1 11 1 -2 12 1 -3 13 1 -7 17 1 -8 18 1 -8 28 1 -PREHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - val - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - k - TOK_SELEXPR - TOK_TABLE_OR_COL - val - v - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - k - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - k - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl3 - TOK_SELECT - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - 1 - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: 1 (type: int), key (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 SIMPLE [] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -1 1 1 -1 2 1 -1 3 1 -1 7 1 -1 8 2 -PREHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl4 -PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - 1 - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), 1 (type: int), val (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 1 11 1 -2 1 12 1 -3 1 13 1 -7 1 17 1 -8 1 18 1 -8 1 28 1 -PREHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl3 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - + - TOK_TABLE_OR_COL - key - 1 - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - + - TOK_TABLE_OR_COL - key - 1 - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), (key + 1) (type: double) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: double) - sort order: ++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: double) - mode: partials - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: double) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: double) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - numFiles 1 - numRows 5 - rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,cnt - columns.comments - columns.types int:int:int -#### A masked pattern was here #### - name default.outputtbl3 - numFiles 1 - numRows 5 - rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 -#### A masked pattern was here #### -1 2 1 -2 3 1 -3 4 1 -7 8 1 -8 9 2 -PREHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -PREHOOK: type: QUERY -POSTHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - cnt - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - sum - TOK_TABLE_OR_COL - cnt - TOK_GROUPBY - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: sum(_col1) - keys: (_col0 + _col0) (type: double) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: double) - sort order: + - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: double) - mode: partials - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: double) - sort order: + - Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: double) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -group by key + key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -14 1 -16 2 -2 1 -4 1 -6 1 -PREHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_UNION - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_ALLCOLREF - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [null-subquery1:subq1-subquery1:t1, null-subquery2:subq1-subquery2:t1] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 22 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key -) subq1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, (t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -1 1 -2 1 -2 1 -3 1 -3 1 -7 1 -7 1 -8 2 -8 2 -PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) FROM T1 GROUP BY key + key -) subq1 -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) FROM T1 GROUP BY key + key -) subq1 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_UNION - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - + - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - key - subq1 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_ALLCOLREF - - -STAGE DEPENDENCIES: - Stage-9 is a root stage - Stage-10 depends on stages: Stage-9 - Stage-2 depends on stages: Stage-10 - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-3 depends on stages: Stage-0 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - -STAGE PLANS: - Stage: Stage-9 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: (key + key) (type: double) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: double) - sort order: + - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [null-subquery2:subq1-subquery2:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: double) - mode: partials - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-10 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: double) - sort order: + - Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10002 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: double) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: double), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToDouble(_col0) (type: double), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - TableScan - GatherStats: false - Union - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10003 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types double,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [null-subquery1:subq1-subquery1:t1] -#### A masked pattern was here #### - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 40 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key -) subq1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key - UNION ALL -SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key -) subq1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, (t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -14 1 -16 2 -2 1 -2 1 -3 1 -4 1 -6 1 -7 1 -8 2 -PREHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_JOIN - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - cnt - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - cnt - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq2 - = - . - TOK_TABLE_OR_COL - subq1 - key - . - TOK_TABLE_OR_COL - subq2 - key - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - . - TOK_TABLE_OR_COL - subq1 - key - TOK_SELEXPR - + - . - TOK_TABLE_OR_COL - subq1 - cnt - . - TOK_TABLE_OR_COL - subq2 - cnt - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: bigint) - auto parallelism: false - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1, subq2:t1] - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} - 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger((_col1 + _col3)) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 42 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 10 - rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 42 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 -ON subq1.key = subq2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, (t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 2 -2 2 -3 2 -7 2 -8 4 -PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED -SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 -ON subq1.key = subq2.key -PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED -SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 -JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 -ON subq1.key = subq2.key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_JOIN - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - subq1 - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T1 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - val - subq2 - = - . - TOK_TABLE_OR_COL - subq1 - key - . - TOK_TABLE_OR_COL - subq2 - key - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_ALLCOLREF - - -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-3 depends on stages: Stage-2 - Stage-1 depends on stages: Stage-3 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), val (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq2:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10002 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: key is not null (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 0 - value expressions: _col1 (type: bigint) - auto parallelism: false - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string), _col2 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10003 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -#### A masked pattern was here #### - Partition - base file name: t1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t1 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t1 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 - Truncated Path -> Alias: - /t1 [subq1:t1] -#### A masked pattern was here #### - Needs Tagging: true - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} - 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string), _col4 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3,_col4 - columns.types string:bigint:string:string:bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) -CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) -CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -PREHOOK: type: QUERY -POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl1 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - bucketGroup: true - keys: key (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: partials - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,cnt - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.outputtbl1 - numFiles 1 - numRows 5 - rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 20 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - - Stage: Stage-3 - Stats-Aggr Operator -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - 1 - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), 1 (type: int), val (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 1 11 1 -2 1 12 1 -3 1 13 1 -7 1 17 1 -8 1 18 1 -8 1 28 1 -PREHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl5 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl5 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - 2 - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - 1 - TOK_TABLE_OR_COL - val - 2 - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), 1 (type: int), val (type: string), 2 (type: int) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key1,key2,key3,key4,cnt - columns.comments - columns.types int:int:string:int:int -#### A masked pattern was here #### - name default.outputtbl5 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl5 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl5 -POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key4 SIMPLE [] -PREHOOK: query: SELECT * FROM outputTbl5 -ORDER BY key1, key2, key3, key4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl5 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl5 -ORDER BY key1, key2, key3, key4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl5 -#### A masked pattern was here #### -1 1 11 2 1 -2 1 12 2 1 -3 1 13 2 1 -7 1 17 2 1 -8 1 18 2 1 -8 1 28 2 1 -PREHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -PREHOOK: type: QUERY -POSTHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - constant - TOK_SELEXPR - TOK_TABLE_OR_COL - val - subq - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - constant - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - constant - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), 1 (type: int), val (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col1 (type: int), _col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [subq:t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq -group by key, constant, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 1 11 1 -2 1 12 1 -3 1 13 1 -7 1 17 1 -8 1 18 1 -8 1 28 1 -PREHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -PREHOOK: type: QUERY -POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - -TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_SUBQUERY - TOK_QUERY - TOK_FROM - TOK_TABREF - TOK_TABNAME - T2 - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - 1 - constant - TOK_SELEXPR - TOK_TABLE_OR_COL - val - subq - TOK_INSERT - TOK_DESTINATION - TOK_DIR - TOK_TMP_FILE - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - constant - constant2 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - 2 - constant3 - subq2 - TOK_INSERT - TOK_DESTINATION - TOK_TAB - TOK_TABNAME - outputTbl4 - TOK_SELECT - TOK_SELEXPR - TOK_TABLE_OR_COL - key - TOK_SELEXPR - TOK_TABLE_OR_COL - constant3 - TOK_SELEXPR - TOK_TABLE_OR_COL - val - TOK_SELEXPR - TOK_FUNCTION - count - 1 - TOK_GROUPBY - TOK_TABLE_OR_COL - key - TOK_TABLE_OR_COL - constant3 - TOK_TABLE_OR_COL - val - - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), 2 (type: int), val (type: string) - outputColumnNames: _col0, _col3, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col3 (type: int), _col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: t2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - SORTBUCKETCOLSPREFIX TRUE - bucket_count 2 - bucket_field_name key - columns key,val - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.t2 - numFiles 1 - numRows 6 - rawDataSize 24 - serialization.ddl struct t2 { string key, string val} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 30 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 - Truncated Path -> Alias: - /t2 [subq2:subq:t2] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - - Stage: Stage-2 - Stats-Aggr Operator -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10001 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key1,key2,key3,cnt - columns.comments - columns.types int:int:string:int -#### A masked pattern was here #### - name default.outputtbl4 - numFiles 1 - numRows 6 - rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 54 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -select key, constant3, val, count(1) from -( -SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq -)subq2 -group by key, constant3, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 -PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 -#### A masked pattern was here #### -1 2 11 1 -2 2 12 1 -3 2 13 1 -7 2 17 1 -8 2 18 1 -8 2 28 1 -PREHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 -PREHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - Stage-4 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-3 - Stage-5 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: key - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - bucketGroup: true - keys: key (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: bigint) - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: key, val - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: key (type: string), val (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: partials - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-4 - Stats-Aggr Operator - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-5 - Stats-Aggr Operator - -PREHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -1 1 -2 1 -3 1 -7 1 -8 2 -PREHOOK: query: select * from DEST2 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 -#### A masked pattern was here #### -1 11 1 -2 12 1 -3 13 1 -7 17 1 -8 18 1 -8 28 1 -PREHOOK: query: -- multi-table insert with a sub-query -EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -POSTHOOK: query: -- multi-table insert with a sub-query -EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - Stage-4 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-3 - Stage-5 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key = 8) (type: boolean) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: val (type: string) - outputColumnNames: _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '8' (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - bucketGroup: true - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: bigint) - Select Operator - expressions: '8' (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col1 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: partials - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-4 - Stats-Aggr Operator - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-5 - Stats-Aggr Operator - -PREHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -8 2 -PREHOOK: query: select * from DEST2 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 -#### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 -#### A masked pattern was here #### -8 18 1 -8 28 1 diff --git ql/src/test/results/clientpositive/input12_hadoop20.q.out ql/src/test/results/clientpositive/input12_hadoop20.q.out deleted file mode 100644 index e280c81757..0000000000 --- ql/src/test/results/clientpositive/input12_hadoop20.q.out +++ /dev/null @@ -1,822 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -PREHOOK: query: CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest3 -PREHOOK: query: EXPLAIN -FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7 - Stage-6 - Stage-0 depends on stages: Stage-6, Stage-5, Stage-8 - Stage-4 depends on stages: Stage-0 - Stage-5 - Stage-7 - Stage-8 depends on stages: Stage-7 - Stage-15 depends on stages: Stage-3 , consists of Stage-12, Stage-11, Stage-13 - Stage-12 - Stage-1 depends on stages: Stage-12, Stage-11, Stage-14 - Stage-10 depends on stages: Stage-1 - Stage-11 - Stage-13 - Stage-14 depends on stages: Stage-13 - Stage-21 depends on stages: Stage-3 , consists of Stage-18, Stage-17, Stage-19 - Stage-18 - Stage-2 depends on stages: Stage-18, Stage-17, Stage-20 - Stage-16 depends on stages: Stage-2 - Stage-17 - Stage-19 - Stage-20 depends on stages: Stage-19 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key < 100) (type: boolean) - Statistics: Num rows: 9 Data size: 1803 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 9 Data size: 1803 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 9 Data size: 1803 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - Filter Operator - predicate: ((key >= 100) and (key < 200)) (type: boolean) - Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - Filter Operator - predicate: (key >= 200) (type: boolean) - Statistics: Num rows: 9 Data size: 1803 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 9 Data size: 1803 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 9 Data size: 1803 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 - - Stage: Stage-9 - Conditional Operator - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-4 - Stats-Aggr Operator - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-7 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-8 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-15 - Conditional Operator - - Stage: Stage-12 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-10 - Stats-Aggr Operator - - Stage: Stage-11 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-13 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 - - Stage: Stage-14 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-21 - Conditional Operator - - Stage: Stage-18 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - tables: - partition: - ds 2008-04-08 - hr 12 - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 - - Stage: Stage-16 - Stats-Aggr Operator - - Stage: Stage-17 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 - - Stage: Stage-19 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 - - Stage: Stage-20 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -PREHOOK: Output: default@dest3@ds=2008-04-08/hr=12 -POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Output: default@dest3@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -86 val_86 -27 val_27 -98 val_98 -66 val_66 -37 val_37 -15 val_15 -82 val_82 -17 val_17 -0 val_0 -57 val_57 -20 val_20 -92 val_92 -47 val_47 -72 val_72 -4 val_4 -35 val_35 -54 val_54 -51 val_51 -65 val_65 -83 val_83 -12 val_12 -67 val_67 -84 val_84 -58 val_58 -8 val_8 -24 val_24 -42 val_42 -0 val_0 -96 val_96 -26 val_26 -51 val_51 -43 val_43 -95 val_95 -98 val_98 -85 val_85 -77 val_77 -0 val_0 -87 val_87 -15 val_15 -72 val_72 -90 val_90 -19 val_19 -10 val_10 -5 val_5 -58 val_58 -35 val_35 -95 val_95 -11 val_11 -34 val_34 -42 val_42 -78 val_78 -76 val_76 -41 val_41 -30 val_30 -64 val_64 -76 val_76 -74 val_74 -69 val_69 -33 val_33 -70 val_70 -5 val_5 -2 val_2 -35 val_35 -80 val_80 -44 val_44 -53 val_53 -90 val_90 -12 val_12 -5 val_5 -70 val_70 -24 val_24 -70 val_70 -83 val_83 -26 val_26 -67 val_67 -18 val_18 -9 val_9 -18 val_18 -97 val_97 -84 val_84 -28 val_28 -37 val_37 -90 val_90 -97 val_97 -PREHOOK: query: SELECT dest2.* FROM dest2 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 -#### A masked pattern was here #### -POSTHOOK: query: SELECT dest2.* FROM dest2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 -#### A masked pattern was here #### -165 val_165 -193 val_193 -150 val_150 -128 val_128 -146 val_146 -152 val_152 -145 val_145 -166 val_166 -153 val_153 -193 val_193 -174 val_174 -199 val_199 -174 val_174 -162 val_162 -167 val_167 -195 val_195 -113 val_113 -155 val_155 -128 val_128 -149 val_149 -129 val_129 -170 val_170 -157 val_157 -111 val_111 -169 val_169 -125 val_125 -192 val_192 -187 val_187 -176 val_176 -138 val_138 -103 val_103 -176 val_176 -137 val_137 -180 val_180 -181 val_181 -138 val_138 -179 val_179 -172 val_172 -129 val_129 -158 val_158 -119 val_119 -197 val_197 -100 val_100 -199 val_199 -191 val_191 -165 val_165 -120 val_120 -131 val_131 -156 val_156 -196 val_196 -197 val_197 -187 val_187 -137 val_137 -169 val_169 -179 val_179 -118 val_118 -134 val_134 -138 val_138 -118 val_118 -177 val_177 -168 val_168 -143 val_143 -160 val_160 -195 val_195 -119 val_119 -149 val_149 -138 val_138 -103 val_103 -113 val_113 -167 val_167 -116 val_116 -191 val_191 -128 val_128 -193 val_193 -104 val_104 -175 val_175 -105 val_105 -190 val_190 -114 val_114 -164 val_164 -125 val_125 -164 val_164 -187 val_187 -104 val_104 -163 val_163 -119 val_119 -199 val_199 -120 val_120 -169 val_169 -178 val_178 -136 val_136 -172 val_172 -133 val_133 -175 val_175 -189 val_189 -134 val_134 -100 val_100 -146 val_146 -186 val_186 -167 val_167 -183 val_183 -152 val_152 -194 val_194 -126 val_126 -169 val_169 -PREHOOK: query: SELECT dest3.* FROM dest3 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest3 -PREHOOK: Input: default@dest3@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT dest3.* FROM dest3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest3 -POSTHOOK: Input: default@dest3@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -238 2008-04-08 12 -311 2008-04-08 12 -409 2008-04-08 12 -255 2008-04-08 12 -278 2008-04-08 12 -484 2008-04-08 12 -265 2008-04-08 12 -401 2008-04-08 12 -273 2008-04-08 12 -224 2008-04-08 12 -369 2008-04-08 12 -213 2008-04-08 12 -406 2008-04-08 12 -429 2008-04-08 12 -374 2008-04-08 12 -469 2008-04-08 12 -495 2008-04-08 12 -327 2008-04-08 12 -281 2008-04-08 12 -277 2008-04-08 12 -209 2008-04-08 12 -403 2008-04-08 12 -417 2008-04-08 12 -430 2008-04-08 12 -252 2008-04-08 12 -292 2008-04-08 12 -219 2008-04-08 12 -287 2008-04-08 12 -338 2008-04-08 12 -446 2008-04-08 12 -459 2008-04-08 12 -394 2008-04-08 12 -237 2008-04-08 12 -482 2008-04-08 12 -413 2008-04-08 12 -494 2008-04-08 12 -207 2008-04-08 12 -466 2008-04-08 12 -208 2008-04-08 12 -399 2008-04-08 12 -396 2008-04-08 12 -247 2008-04-08 12 -417 2008-04-08 12 -489 2008-04-08 12 -377 2008-04-08 12 -397 2008-04-08 12 -309 2008-04-08 12 -365 2008-04-08 12 -266 2008-04-08 12 -439 2008-04-08 12 -342 2008-04-08 12 -367 2008-04-08 12 -325 2008-04-08 12 -475 2008-04-08 12 -203 2008-04-08 12 -339 2008-04-08 12 -455 2008-04-08 12 -311 2008-04-08 12 -316 2008-04-08 12 -302 2008-04-08 12 -205 2008-04-08 12 -438 2008-04-08 12 -345 2008-04-08 12 -489 2008-04-08 12 -378 2008-04-08 12 -221 2008-04-08 12 -280 2008-04-08 12 -427 2008-04-08 12 -277 2008-04-08 12 -208 2008-04-08 12 -356 2008-04-08 12 -399 2008-04-08 12 -382 2008-04-08 12 -498 2008-04-08 12 -386 2008-04-08 12 -437 2008-04-08 12 -469 2008-04-08 12 -286 2008-04-08 12 -459 2008-04-08 12 -239 2008-04-08 12 -213 2008-04-08 12 -216 2008-04-08 12 -430 2008-04-08 12 -278 2008-04-08 12 -289 2008-04-08 12 -221 2008-04-08 12 -318 2008-04-08 12 -332 2008-04-08 12 -311 2008-04-08 12 -275 2008-04-08 12 -241 2008-04-08 12 -333 2008-04-08 12 -284 2008-04-08 12 -230 2008-04-08 12 -260 2008-04-08 12 -404 2008-04-08 12 -384 2008-04-08 12 -489 2008-04-08 12 -353 2008-04-08 12 -373 2008-04-08 12 -272 2008-04-08 12 -217 2008-04-08 12 -348 2008-04-08 12 -466 2008-04-08 12 -411 2008-04-08 12 -230 2008-04-08 12 -208 2008-04-08 12 -348 2008-04-08 12 -463 2008-04-08 12 -431 2008-04-08 12 -496 2008-04-08 12 -322 2008-04-08 12 -468 2008-04-08 12 -393 2008-04-08 12 -454 2008-04-08 12 -298 2008-04-08 12 -418 2008-04-08 12 -327 2008-04-08 12 -230 2008-04-08 12 -205 2008-04-08 12 -404 2008-04-08 12 -436 2008-04-08 12 -469 2008-04-08 12 -468 2008-04-08 12 -308 2008-04-08 12 -288 2008-04-08 12 -481 2008-04-08 12 -457 2008-04-08 12 -282 2008-04-08 12 -318 2008-04-08 12 -318 2008-04-08 12 -409 2008-04-08 12 -470 2008-04-08 12 -369 2008-04-08 12 -316 2008-04-08 12 -413 2008-04-08 12 -490 2008-04-08 12 -364 2008-04-08 12 -395 2008-04-08 12 -282 2008-04-08 12 -238 2008-04-08 12 -419 2008-04-08 12 -307 2008-04-08 12 -435 2008-04-08 12 -277 2008-04-08 12 -273 2008-04-08 12 -306 2008-04-08 12 -224 2008-04-08 12 -309 2008-04-08 12 -389 2008-04-08 12 -327 2008-04-08 12 -242 2008-04-08 12 -369 2008-04-08 12 -392 2008-04-08 12 -272 2008-04-08 12 -331 2008-04-08 12 -401 2008-04-08 12 -242 2008-04-08 12 -452 2008-04-08 12 -226 2008-04-08 12 -497 2008-04-08 12 -402 2008-04-08 12 -396 2008-04-08 12 -317 2008-04-08 12 -395 2008-04-08 12 -336 2008-04-08 12 -229 2008-04-08 12 -233 2008-04-08 12 -472 2008-04-08 12 -322 2008-04-08 12 -498 2008-04-08 12 -321 2008-04-08 12 -430 2008-04-08 12 -489 2008-04-08 12 -458 2008-04-08 12 -223 2008-04-08 12 -492 2008-04-08 12 -449 2008-04-08 12 -218 2008-04-08 12 -228 2008-04-08 12 -453 2008-04-08 12 -209 2008-04-08 12 -468 2008-04-08 12 -342 2008-04-08 12 -230 2008-04-08 12 -368 2008-04-08 12 -296 2008-04-08 12 -216 2008-04-08 12 -367 2008-04-08 12 -344 2008-04-08 12 -274 2008-04-08 12 -219 2008-04-08 12 -239 2008-04-08 12 -485 2008-04-08 12 -223 2008-04-08 12 -256 2008-04-08 12 -263 2008-04-08 12 -487 2008-04-08 12 -480 2008-04-08 12 -401 2008-04-08 12 -288 2008-04-08 12 -244 2008-04-08 12 -438 2008-04-08 12 -467 2008-04-08 12 -432 2008-04-08 12 -202 2008-04-08 12 -316 2008-04-08 12 -229 2008-04-08 12 -469 2008-04-08 12 -463 2008-04-08 12 -280 2008-04-08 12 -283 2008-04-08 12 -331 2008-04-08 12 -235 2008-04-08 12 -321 2008-04-08 12 -335 2008-04-08 12 -466 2008-04-08 12 -366 2008-04-08 12 -403 2008-04-08 12 -483 2008-04-08 12 -257 2008-04-08 12 -406 2008-04-08 12 -409 2008-04-08 12 -406 2008-04-08 12 -401 2008-04-08 12 -258 2008-04-08 12 -203 2008-04-08 12 -262 2008-04-08 12 -348 2008-04-08 12 -424 2008-04-08 12 -396 2008-04-08 12 -201 2008-04-08 12 -217 2008-04-08 12 -431 2008-04-08 12 -454 2008-04-08 12 -478 2008-04-08 12 -298 2008-04-08 12 -431 2008-04-08 12 -424 2008-04-08 12 -382 2008-04-08 12 -397 2008-04-08 12 -480 2008-04-08 12 -291 2008-04-08 12 -351 2008-04-08 12 -255 2008-04-08 12 -438 2008-04-08 12 -414 2008-04-08 12 -200 2008-04-08 12 -491 2008-04-08 12 -237 2008-04-08 12 -439 2008-04-08 12 -360 2008-04-08 12 -248 2008-04-08 12 -479 2008-04-08 12 -305 2008-04-08 12 -417 2008-04-08 12 -444 2008-04-08 12 -429 2008-04-08 12 -443 2008-04-08 12 -323 2008-04-08 12 -325 2008-04-08 12 -277 2008-04-08 12 -230 2008-04-08 12 -478 2008-04-08 12 -468 2008-04-08 12 -310 2008-04-08 12 -317 2008-04-08 12 -333 2008-04-08 12 -493 2008-04-08 12 -460 2008-04-08 12 -207 2008-04-08 12 -249 2008-04-08 12 -265 2008-04-08 12 -480 2008-04-08 12 -353 2008-04-08 12 -214 2008-04-08 12 -462 2008-04-08 12 -233 2008-04-08 12 -406 2008-04-08 12 -454 2008-04-08 12 -375 2008-04-08 12 -401 2008-04-08 12 -421 2008-04-08 12 -407 2008-04-08 12 -384 2008-04-08 12 -256 2008-04-08 12 -384 2008-04-08 12 -379 2008-04-08 12 -462 2008-04-08 12 -492 2008-04-08 12 -298 2008-04-08 12 -341 2008-04-08 12 -498 2008-04-08 12 -458 2008-04-08 12 -362 2008-04-08 12 -285 2008-04-08 12 -348 2008-04-08 12 -273 2008-04-08 12 -281 2008-04-08 12 -344 2008-04-08 12 -469 2008-04-08 12 -315 2008-04-08 12 -448 2008-04-08 12 -348 2008-04-08 12 -307 2008-04-08 12 -414 2008-04-08 12 -477 2008-04-08 12 -222 2008-04-08 12 -403 2008-04-08 12 -400 2008-04-08 12 -200 2008-04-08 12 diff --git ql/src/test/results/clientpositive/input31.q.out ql/src/test/results/clientpositive/input31.q.out deleted file mode 100644 index 308a5f5243..0000000000 --- ql/src/test/results/clientpositive/input31.q.out +++ /dev/null @@ -1,128 +0,0 @@ -PREHOOK: query: create table tst_dest31(a int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tst_dest31 -POSTHOOK: query: create table tst_dest31(a int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst_dest31 -PREHOOK: query: create table dest31(a int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@dest31 -POSTHOOK: query: create table dest31(a int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest31 -PREHOOK: query: explain -insert overwrite table dest31 -select count(1) from srcbucket -PREHOOK: type: QUERY -POSTHOOK: query: explain -insert overwrite table dest31 -select count(1) from srcbucket -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcbucket - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tst_dest31 - Select Operator - expressions: _col0 (type: int) - outputColumnNames: a - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(a, 'hll') - mode: complete - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tst_dest31 - - Stage: Stage-2 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: a - Column Types: int - Table: default.tst_dest31 - -PREHOOK: query: insert overwrite table dest31 -select count(1) from srcbucket -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@tst_dest31 -POSTHOOK: query: insert overwrite table dest31 -select count(1) from srcbucket -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@tst_dest31 -POSTHOOK: Lineage: tst_dest31.a EXPRESSION [(srcbucket)srcbucket.null, ] -PREHOOK: query: select * from tst_dest31 -PREHOOK: type: QUERY -PREHOOK: Input: default@tst_dest31 -#### A masked pattern was here #### -POSTHOOK: query: select * from tst_dest31 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tst_dest31 -#### A masked pattern was here #### -242 diff --git ql/src/test/results/clientpositive/input39_hadoop20.q.out ql/src/test/results/clientpositive/input39_hadoop20.q.out deleted file mode 100644 index d7b92e066b..0000000000 --- ql/src/test/results/clientpositive/input39_hadoop20.q.out +++ /dev/null @@ -1,163 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - - -create table t1(key string, value string) partitioned by (ds string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - - -create table t1(key string, value string) partitioned by (ds string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2(key string, value string) partitioned by (ds string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t2(key string, value string) partitioned by (ds string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: insert overwrite table t1 partition (ds='1') -select key, value from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: insert overwrite table t1 partition (ds='1') -select key, value from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table t1 partition (ds='2') -select key, value from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=2 -POSTHOOK: query: insert overwrite table t1 partition (ds='2') -select key, value from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=2 -POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table t2 partition (ds='1') -select key, value from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t2@ds=1 -POSTHOOK: query: insert overwrite table t2 partition (ds='1') -select key, value from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@ds=1 -POSTHOOK: Lineage: t2 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((((hash(rand(460476415)) & 2147483647) % 32) = 0) and key is not null) (type: boolean) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: key (type: string) - sort order: + - Map-reduce partition columns: key (type: string) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE - TableScan - alias: t1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((((hash(rand(460476415)) & 2147483647) % 32) = 0) and key is not null) (type: boolean) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: key (type: string) - sort order: + - Map-reduce partition columns: key (type: string) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 - 1 - Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@ds=1 -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@ds=1 -#### A masked pattern was here #### -18 -mapred.job.tracker=localhost:58 diff --git ql/src/test/results/clientpositive/join14_hadoop20.q.out ql/src/test/results/clientpositive/join14_hadoop20.q.out deleted file mode 100644 index 9f759768de..0000000000 --- ql/src/test/results/clientpositive/join14_hadoop20.q.out +++ /dev/null @@ -1,1864 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN -FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((key > 100) and key is not null) (type: boolean) - Statistics: Num rows: 19 Data size: 3807 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: key (type: string) - sort order: + - Map-reduce partition columns: key (type: string) - Statistics: Num rows: 19 Data size: 3807 Basic stats: COMPLETE Column stats: NONE - value expressions: value (type: string) - TableScan - alias: src - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((key > 100) and key is not null) (type: boolean) - Statistics: Num rows: 19 Data size: 1903 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: key (type: string) - sort order: + - Map-reduce partition columns: key (type: string) - Statistics: Num rows: 19 Data size: 1903 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - condition expressions: - 0 {KEY.reducesinkkey0} - 1 {VALUE._col0} - outputColumnNames: _col0, _col5 - Statistics: Num rows: 20 Data size: 2093 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 20 Data size: 2093 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 20 Data size: 2093 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select dest1.* from dest1 -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -#### A masked pattern was here #### -POSTHOOK: query: select dest1.* from dest1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -#### A masked pattern was here #### -103 val_103 -103 val_103 -103 val_103 -103 val_103 -103 val_103 -103 val_103 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -104 val_104 -104 val_104 -104 val_104 -104 val_104 -104 val_104 -104 val_104 -105 val_105 -105 val_105 -111 val_111 -111 val_111 -113 val_113 -113 val_113 -113 val_113 -113 val_113 -113 val_113 -113 val_113 -113 val_113 -113 val_113 -114 val_114 -114 val_114 -116 val_116 -116 val_116 -118 val_118 -118 val_118 -118 val_118 -118 val_118 -118 val_118 -118 val_118 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -120 val_120 -120 val_120 -120 val_120 -120 val_120 -120 val_120 -120 val_120 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -125 val_125 -125 val_125 -125 val_125 -125 val_125 -125 val_125 -125 val_125 -126 val_126 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -129 val_129 -129 val_129 -129 val_129 -129 val_129 -129 val_129 -129 val_129 -131 val_131 -131 val_131 -133 val_133 -133 val_133 -134 val_134 -134 val_134 -134 val_134 -134 val_134 -134 val_134 -134 val_134 -134 val_134 -134 val_134 -136 val_136 -136 val_136 -137 val_137 -137 val_137 -137 val_137 -137 val_137 -137 val_137 -137 val_137 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -143 val_143 -145 val_145 -145 val_145 -146 val_146 -146 val_146 -146 val_146 -146 val_146 -146 val_146 -146 val_146 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -149 val_149 -149 val_149 -149 val_149 -149 val_149 -149 val_149 -149 val_149 -150 val_150 -150 val_150 -152 val_152 -152 val_152 -152 val_152 -152 val_152 -152 val_152 -152 val_152 -152 val_152 -152 val_152 -153 val_153 -153 val_153 -155 val_155 -155 val_155 -156 val_156 -156 val_156 -157 val_157 -157 val_157 -158 val_158 -158 val_158 -160 val_160 -160 val_160 -162 val_162 -162 val_162 -163 val_163 -163 val_163 -164 val_164 -164 val_164 -164 val_164 -164 val_164 -164 val_164 -164 val_164 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -165 val_165 -165 val_165 -165 val_165 -165 val_165 -165 val_165 -165 val_165 -166 val_166 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -170 val_170 -170 val_170 -172 val_172 -172 val_172 -172 val_172 -172 val_172 -172 val_172 -172 val_172 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -174 val_174 -174 val_174 -174 val_174 -174 val_174 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -175 val_175 -175 val_175 -175 val_175 -175 val_175 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -176 val_176 -176 val_176 -176 val_176 -176 val_176 -176 val_176 -176 val_176 -177 val_177 -177 val_177 -178 val_178 -178 val_178 -179 val_179 -179 val_179 -179 val_179 -179 val_179 -179 val_179 -179 val_179 -179 val_179 -179 val_179 -180 val_180 -180 val_180 -181 val_181 -181 val_181 -183 val_183 -183 val_183 -186 val_186 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -189 val_189 -190 val_190 -190 val_190 -191 val_191 -191 val_191 -191 val_191 -191 val_191 -191 val_191 -191 val_191 -191 val_191 -191 val_191 -192 val_192 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -194 val_194 -195 val_195 -195 val_195 -195 val_195 -195 val_195 -195 val_195 -195 val_195 -195 val_195 -195 val_195 -196 val_196 -196 val_196 -197 val_197 -197 val_197 -197 val_197 -197 val_197 -197 val_197 -197 val_197 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -200 val_200 -200 val_200 -200 val_200 -200 val_200 -200 val_200 -200 val_200 -200 val_200 -200 val_200 -201 val_201 -201 val_201 -202 val_202 -202 val_202 -203 val_203 -203 val_203 -203 val_203 -203 val_203 -203 val_203 -203 val_203 -203 val_203 -203 val_203 -205 val_205 -205 val_205 -205 val_205 -205 val_205 -205 val_205 -205 val_205 -205 val_205 -205 val_205 -207 val_207 -207 val_207 -207 val_207 -207 val_207 -207 val_207 -207 val_207 -207 val_207 -207 val_207 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -208 val_208 -209 val_209 -209 val_209 -209 val_209 -209 val_209 -209 val_209 -209 val_209 -209 val_209 -209 val_209 -213 val_213 -213 val_213 -213 val_213 -213 val_213 -213 val_213 -213 val_213 -213 val_213 -213 val_213 -214 val_214 -214 val_214 -216 val_216 -216 val_216 -216 val_216 -216 val_216 -216 val_216 -216 val_216 -216 val_216 -216 val_216 -217 val_217 -217 val_217 -217 val_217 -217 val_217 -217 val_217 -217 val_217 -217 val_217 -217 val_217 -218 val_218 -218 val_218 -219 val_219 -219 val_219 -219 val_219 -219 val_219 -219 val_219 -219 val_219 -219 val_219 -219 val_219 -221 val_221 -221 val_221 -221 val_221 -221 val_221 -221 val_221 -221 val_221 -221 val_221 -221 val_221 -222 val_222 -222 val_222 -223 val_223 -223 val_223 -223 val_223 -223 val_223 -223 val_223 -223 val_223 -223 val_223 -223 val_223 -224 val_224 -224 val_224 -224 val_224 -224 val_224 -224 val_224 -224 val_224 -224 val_224 -224 val_224 -226 val_226 -226 val_226 -228 val_228 -228 val_228 -229 val_229 -229 val_229 -229 val_229 -229 val_229 -229 val_229 -229 val_229 -229 val_229 -229 val_229 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -233 val_233 -233 val_233 -233 val_233 -233 val_233 -233 val_233 -233 val_233 -233 val_233 -233 val_233 -235 val_235 -235 val_235 -237 val_237 -237 val_237 -237 val_237 -237 val_237 -237 val_237 -237 val_237 -237 val_237 -237 val_237 -238 val_238 -238 val_238 -238 val_238 -238 val_238 -238 val_238 -238 val_238 -238 val_238 -238 val_238 -239 val_239 -239 val_239 -239 val_239 -239 val_239 -239 val_239 -239 val_239 -239 val_239 -239 val_239 -241 val_241 -241 val_241 -242 val_242 -242 val_242 -242 val_242 -242 val_242 -242 val_242 -242 val_242 -242 val_242 -242 val_242 -244 val_244 -244 val_244 -247 val_247 -247 val_247 -248 val_248 -248 val_248 -249 val_249 -249 val_249 -252 val_252 -252 val_252 -255 val_255 -255 val_255 -255 val_255 -255 val_255 -255 val_255 -255 val_255 -255 val_255 -255 val_255 -256 val_256 -256 val_256 -256 val_256 -256 val_256 -256 val_256 -256 val_256 -256 val_256 -256 val_256 -257 val_257 -257 val_257 -258 val_258 -258 val_258 -260 val_260 -260 val_260 -262 val_262 -262 val_262 -263 val_263 -263 val_263 -265 val_265 -265 val_265 -265 val_265 -265 val_265 -265 val_265 -265 val_265 -265 val_265 -265 val_265 -266 val_266 -266 val_266 -272 val_272 -272 val_272 -272 val_272 -272 val_272 -272 val_272 -272 val_272 -272 val_272 -272 val_272 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -273 val_273 -274 val_274 -274 val_274 -275 val_275 -275 val_275 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -278 val_278 -278 val_278 -278 val_278 -278 val_278 -278 val_278 -278 val_278 -278 val_278 -278 val_278 -280 val_280 -280 val_280 -280 val_280 -280 val_280 -280 val_280 -280 val_280 -280 val_280 -280 val_280 -281 val_281 -281 val_281 -281 val_281 -281 val_281 -281 val_281 -281 val_281 -281 val_281 -281 val_281 -282 val_282 -282 val_282 -282 val_282 -282 val_282 -282 val_282 -282 val_282 -282 val_282 -282 val_282 -283 val_283 -283 val_283 -284 val_284 -284 val_284 -285 val_285 -285 val_285 -286 val_286 -286 val_286 -287 val_287 -287 val_287 -288 val_288 -288 val_288 -288 val_288 -288 val_288 -288 val_288 -288 val_288 -288 val_288 -288 val_288 -289 val_289 -289 val_289 -291 val_291 -291 val_291 -292 val_292 -292 val_292 -296 val_296 -296 val_296 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -302 val_302 -302 val_302 -305 val_305 -305 val_305 -306 val_306 -306 val_306 -307 val_307 -307 val_307 -307 val_307 -307 val_307 -307 val_307 -307 val_307 -307 val_307 -307 val_307 -308 val_308 -308 val_308 -309 val_309 -309 val_309 -309 val_309 -309 val_309 -309 val_309 -309 val_309 -309 val_309 -309 val_309 -310 val_310 -310 val_310 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -311 val_311 -315 val_315 -315 val_315 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -317 val_317 -317 val_317 -317 val_317 -317 val_317 -317 val_317 -317 val_317 -317 val_317 -317 val_317 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -318 val_318 -321 val_321 -321 val_321 -321 val_321 -321 val_321 -321 val_321 -321 val_321 -321 val_321 -321 val_321 -322 val_322 -322 val_322 -322 val_322 -322 val_322 -322 val_322 -322 val_322 -322 val_322 -322 val_322 -323 val_323 -323 val_323 -325 val_325 -325 val_325 -325 val_325 -325 val_325 -325 val_325 -325 val_325 -325 val_325 -325 val_325 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -331 val_331 -331 val_331 -331 val_331 -331 val_331 -331 val_331 -331 val_331 -331 val_331 -331 val_331 -332 val_332 -332 val_332 -333 val_333 -333 val_333 -333 val_333 -333 val_333 -333 val_333 -333 val_333 -333 val_333 -333 val_333 -335 val_335 -335 val_335 -336 val_336 -336 val_336 -338 val_338 -338 val_338 -339 val_339 -339 val_339 -341 val_341 -341 val_341 -342 val_342 -342 val_342 -342 val_342 -342 val_342 -342 val_342 -342 val_342 -342 val_342 -342 val_342 -344 val_344 -344 val_344 -344 val_344 -344 val_344 -344 val_344 -344 val_344 -344 val_344 -344 val_344 -345 val_345 -345 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -351 val_351 -351 val_351 -353 val_353 -353 val_353 -353 val_353 -353 val_353 -353 val_353 -353 val_353 -353 val_353 -353 val_353 -356 val_356 -356 val_356 -360 val_360 -360 val_360 -362 val_362 -362 val_362 -364 val_364 -364 val_364 -365 val_365 -365 val_365 -366 val_366 -366 val_366 -367 val_367 -367 val_367 -367 val_367 -367 val_367 -367 val_367 -367 val_367 -367 val_367 -367 val_367 -368 val_368 -368 val_368 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -369 val_369 -373 val_373 -373 val_373 -374 val_374 -374 val_374 -375 val_375 -375 val_375 -377 val_377 -377 val_377 -378 val_378 -378 val_378 -379 val_379 -379 val_379 -382 val_382 -382 val_382 -382 val_382 -382 val_382 -382 val_382 -382 val_382 -382 val_382 -382 val_382 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -384 val_384 -386 val_386 -386 val_386 -389 val_389 -389 val_389 -392 val_392 -392 val_392 -393 val_393 -393 val_393 -394 val_394 -394 val_394 -395 val_395 -395 val_395 -395 val_395 -395 val_395 -395 val_395 -395 val_395 -395 val_395 -395 val_395 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -397 val_397 -397 val_397 -397 val_397 -397 val_397 -397 val_397 -397 val_397 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -399 val_399 -399 val_399 -399 val_399 -399 val_399 -399 val_399 -399 val_399 -400 val_400 -400 val_400 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -402 val_402 -402 val_402 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -403 val_403 -404 val_404 -404 val_404 -404 val_404 -404 val_404 -404 val_404 -404 val_404 -404 val_404 -404 val_404 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -407 val_407 -407 val_407 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -411 val_411 -411 val_411 -413 val_413 -413 val_413 -413 val_413 -413 val_413 -413 val_413 -413 val_413 -413 val_413 -413 val_413 -414 val_414 -414 val_414 -414 val_414 -414 val_414 -414 val_414 -414 val_414 -414 val_414 -414 val_414 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -417 val_417 -418 val_418 -418 val_418 -419 val_419 -419 val_419 -421 val_421 -421 val_421 -424 val_424 -424 val_424 -424 val_424 -424 val_424 -424 val_424 -424 val_424 -424 val_424 -424 val_424 -427 val_427 -427 val_427 -429 val_429 -429 val_429 -429 val_429 -429 val_429 -429 val_429 -429 val_429 -429 val_429 -429 val_429 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -431 val_431 -432 val_432 -432 val_432 -435 val_435 -435 val_435 -436 val_436 -436 val_436 -437 val_437 -437 val_437 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -439 val_439 -439 val_439 -439 val_439 -439 val_439 -439 val_439 -439 val_439 -439 val_439 -439 val_439 -443 val_443 -443 val_443 -444 val_444 -444 val_444 -446 val_446 -446 val_446 -448 val_448 -448 val_448 -449 val_449 -449 val_449 -452 val_452 -452 val_452 -453 val_453 -453 val_453 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -454 val_454 -455 val_455 -455 val_455 -457 val_457 -457 val_457 -458 val_458 -458 val_458 -458 val_458 -458 val_458 -458 val_458 -458 val_458 -458 val_458 -458 val_458 -459 val_459 -459 val_459 -459 val_459 -459 val_459 -459 val_459 -459 val_459 -459 val_459 -459 val_459 -460 val_460 -460 val_460 -462 val_462 -462 val_462 -462 val_462 -462 val_462 -462 val_462 -462 val_462 -462 val_462 -462 val_462 -463 val_463 -463 val_463 -463 val_463 -463 val_463 -463 val_463 -463 val_463 -463 val_463 -463 val_463 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -467 val_467 -467 val_467 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -470 val_470 -470 val_470 -472 val_472 -472 val_472 -475 val_475 -475 val_475 -477 val_477 -477 val_477 -478 val_478 -478 val_478 -478 val_478 -478 val_478 -478 val_478 -478 val_478 -478 val_478 -478 val_478 -479 val_479 -479 val_479 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -481 val_481 -481 val_481 -482 val_482 -482 val_482 -483 val_483 -483 val_483 -484 val_484 -484 val_484 -485 val_485 -485 val_485 -487 val_487 -487 val_487 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -490 val_490 -490 val_490 -491 val_491 -491 val_491 -492 val_492 -492 val_492 -492 val_492 -492 val_492 -492 val_492 -492 val_492 -492 val_492 -492 val_492 -493 val_493 -493 val_493 -494 val_494 -494 val_494 -495 val_495 -495 val_495 -496 val_496 -496 val_496 -497 val_497 -497 val_497 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 -498 val_498 diff --git ql/src/test/results/clientpositive/join45X.q.out ql/src/test/results/clientpositive/join45X.q.out deleted file mode 100644 index 880a2e3a3f..0000000000 --- ql/src/test/results/clientpositive/join45X.q.out +++ /dev/null @@ -1,256 +0,0 @@ -Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN -SELECT * -FROM src1 JOIN src -ON (src1.key= 100 and src.key=100) -LIMIT 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * -FROM src1 JOIN src -ON (src1.key= 100 and src.key=100) -LIMIT 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src1 - filterExpr: (UDFToDouble(key) = 100.0D) (type: boolean) - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 100.0D) (type: boolean) - Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - TableScan - alias: src - filterExpr: (UDFToDouble(key) = 100.0D) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 100.0D) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 - 1 - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3000 Data size: 57622 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 190 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 190 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 10 - Processor Tree: - ListSink - -Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN -SELECT * -FROM src1 JOIN src -ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) -LIMIT 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * -FROM src1 JOIN src -ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) -LIMIT 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src1 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 - 1 - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToDouble(_col0) = 100.0D) and (UDFToDouble(_col2) = 100.0D)) or ((UDFToDouble(_col0) = 101.0D) and (UDFToDouble(_col2) = 101.0D)) or ((UDFToDouble(_col0) = 102.0D) and (UDFToDouble(_col2) = 102.0D))) (type: boolean) - Statistics: Num rows: 9375 Data size: 180600 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 190 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 190 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 10 - Processor Tree: - ListSink - -Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: SELECT * -FROM src1 JOIN src -ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) -LIMIT 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * -FROM src1 JOIN src -ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) -LIMIT 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### -Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN - SELECT * - FROM src1 JOIN src - ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) - LIMIT 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN - SELECT * - FROM src1 JOIN src - ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) - LIMIT 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src1 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 - 1 - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToDouble(_col0) = 100.0D) and (UDFToDouble(_col2) = 100.0D)) or ((UDFToDouble(_col0) = 101.0D) and (UDFToDouble(_col2) = 101.0D)) or ((UDFToDouble(_col0) = 102.0D) and (UDFToDouble(_col2) = 102.0D))) (type: boolean) - Statistics: Num rows: 9375 Data size: 180600 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 190 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 190 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 10 - Processor Tree: - ListSink - diff --git ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out deleted file mode 100644 index 2416384cbf..0000000000 --- ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out +++ /dev/null @@ -1,7066 +0,0 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_part -POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_part -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab -POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab -PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: analyze table srcbucket_mapjoin compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@srcbucket_mapjoin -PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: analyze table srcbucket_mapjoin compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -#### A masked pattern was here #### -PREHOOK: query: analyze table srcbucket_mapjoin_part compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: analyze table srcbucket_mapjoin_part compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -#### A masked pattern was here #### -PREHOOK: query: analyze table tab compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Output: default@tab -PREHOOK: Output: default@tab@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: analyze table tab compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Output: default@tab -POSTHOOK: Output: default@tab@ds=2008-04-08 -#### A masked pattern was here #### -PREHOOK: query: analyze table tab_part compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -PREHOOK: Output: default@tab_part -PREHOOK: Output: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: analyze table tab_part compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -POSTHOOK: Output: default@tab_part -POSTHOOK: Output: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -PREHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) - sort order: +++ - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -2 val_2 val_2 -4 val_4 val_4 -8 val_8 val_8 -11 val_11 val_11 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -17 val_17 val_17 -19 val_19 val_19 -20 val_20 val_20 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -28 val_28 val_28 -33 val_33 val_33 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -44 val_44 val_44 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -53 val_53 val_53 -57 val_57 val_57 -64 val_64 val_64 -66 val_66 val_66 -77 val_77 val_77 -80 val_80 val_80 -82 val_82 val_82 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -86 val_86 val_86 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -105 val_105 val_105 -114 val_114 val_114 -116 val_116 val_116 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -136 val_136 val_136 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -143 val_143 val_143 -145 val_145 val_145 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -150 val_150 val_150 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -156 val_156 val_156 -158 val_158 val_158 -163 val_163 val_163 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -170 val_170 val_170 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -178 val_178 val_178 -181 val_181 val_181 -183 val_183 val_183 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -189 val_189 val_189 -190 val_190 val_190 -192 val_192 val_192 -194 val_194 val_194 -196 val_196 val_196 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -202 val_202 val_202 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -222 val_222 val_222 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -226 val_226 val_226 -228 val_228 val_228 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -235 val_235 val_235 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -244 val_244 val_244 -248 val_248 val_248 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -257 val_257 val_257 -260 val_260 val_260 -262 val_262 val_262 -266 val_266 val_266 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -275 val_275 val_275 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -284 val_284 val_284 -286 val_286 val_286 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -291 val_291 val_291 -305 val_305 val_305 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -310 val_310 val_310 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -323 val_323 val_323 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -332 val_332 val_332 -336 val_336 val_336 -338 val_338 val_338 -341 val_341 val_341 -345 val_345 val_345 -356 val_356 val_356 -365 val_365 val_365 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -374 val_374 val_374 -378 val_378 val_378 -389 val_389 val_389 -392 val_392 val_392 -394 val_394 val_394 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -400 val_400 val_400 -402 val_402 val_402 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -411 val_411 val_411 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -419 val_419 val_419 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -435 val_435 val_435 -437 val_437 val_437 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -444 val_444 val_444 -446 val_446 val_446 -448 val_448 val_448 -453 val_453 val_453 -455 val_455 val_455 -457 val_457 val_457 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -460 val_460 val_460 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -475 val_475 val_475 -477 val_477 val_477 -479 val_479 val_479 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -482 val_482 val_482 -484 val_484 val_484 -491 val_491 val_491 -493 val_493 val_493 -495 val_495 val_495 -497 val_497 val_497 -PREHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 2 <- Map 1 (CUSTOM_EDGE) - Reducer 3 <- Map 2 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Map 1 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) - sort order: +++ - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -2 val_2 val_2 -4 val_4 val_4 -8 val_8 val_8 -11 val_11 val_11 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -17 val_17 val_17 -19 val_19 val_19 -20 val_20 val_20 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -28 val_28 val_28 -33 val_33 val_33 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -44 val_44 val_44 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -53 val_53 val_53 -57 val_57 val_57 -64 val_64 val_64 -66 val_66 val_66 -77 val_77 val_77 -80 val_80 val_80 -82 val_82 val_82 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -86 val_86 val_86 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -105 val_105 val_105 -114 val_114 val_114 -116 val_116 val_116 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -136 val_136 val_136 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -143 val_143 val_143 -145 val_145 val_145 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -150 val_150 val_150 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -156 val_156 val_156 -158 val_158 val_158 -163 val_163 val_163 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -170 val_170 val_170 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -178 val_178 val_178 -181 val_181 val_181 -183 val_183 val_183 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -189 val_189 val_189 -190 val_190 val_190 -192 val_192 val_192 -194 val_194 val_194 -196 val_196 val_196 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -202 val_202 val_202 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -222 val_222 val_222 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -226 val_226 val_226 -228 val_228 val_228 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -235 val_235 val_235 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -244 val_244 val_244 -248 val_248 val_248 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -257 val_257 val_257 -260 val_260 val_260 -262 val_262 val_262 -266 val_266 val_266 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -275 val_275 val_275 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -284 val_284 val_284 -286 val_286 val_286 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -291 val_291 val_291 -305 val_305 val_305 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -310 val_310 val_310 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -323 val_323 val_323 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -332 val_332 val_332 -336 val_336 val_336 -338 val_338 val_338 -341 val_341 val_341 -345 val_345 val_345 -356 val_356 val_356 -365 val_365 val_365 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -374 val_374 val_374 -378 val_378 val_378 -389 val_389 val_389 -392 val_392 val_392 -394 val_394 val_394 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -400 val_400 val_400 -402 val_402 val_402 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -411 val_411 val_411 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -419 val_419 val_419 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -435 val_435 val_435 -437 val_437 val_437 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -444 val_444 val_444 -446 val_446 val_446 -448 val_448 val_448 -453 val_453 val_453 -455 val_455 val_455 -457 val_457 val_457 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -460 val_460 val_460 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -475 val_475 val_475 -477 val_477 val_477 -479 val_479 val_479 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -482 val_482 val_482 -484 val_484 val_484 -491 val_491 val_491 -493 val_493 val_493 -495 val_495 val_495 -497 val_497 val_497 -PREHOOK: query: explain -select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (ONE_TO_ONE_EDGE) - Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: tab_part - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 204 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -242 -PREHOOK: query: explain -select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (CUSTOM_SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: tab_part - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - input vertices: - 1 Map 4 - Statistics: Num rows: 204 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) -from -(select distinct key from tab_part) a join tab b on a.key = b.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -242 -PREHOOK: query: explain -select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (ONE_TO_ONE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) - Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: d - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 689 Data size: 5512 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 4 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -1166 -PREHOOK: query: explain -select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 2 <- Map 1 (CUSTOM_EDGE), Map 4 (CUSTOM_EDGE) - Reducer 3 <- Map 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: d - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1 - input vertices: - 1 Map 4 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - input vertices: - 0 Map 1 - Statistics: Num rows: 689 Data size: 5512 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) -from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c -join -tab_part d on c.key = d.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -1166 -PREHOOK: query: explain -select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (ONE_TO_ONE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) - Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: d - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 689 Data size: 5512 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 4 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -1166 -PREHOOK: query: explain -select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 2 <- Map 1 (CUSTOM_EDGE), Map 4 (CUSTOM_EDGE) - Reducer 3 <- Map 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: d - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1 - input vertices: - 1 Map 4 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 408 Data size: 1632 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - input vertices: - 0 Map 1 - Statistics: Num rows: 689 Data size: 5512 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) -from -tab_part d -join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -1166 -PREHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab b on a.k1 = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab b on a.k1 = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Map 4 (SIMPLE_EDGE), Reducer 2 (ONE_TO_ONE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcbucket_mapjoin - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), substr(value, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: double), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: int) - sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: double) - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 191 Data size: 19673 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 191 Data size: 19673 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 191 Data size: 19673 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab b on a.k1 = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab b on a.k1 = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 3 <- Reducer 2 (CUSTOM_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcbucket_mapjoin - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), substr(value, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Reducer 2 - Statistics: Num rows: 191 Data size: 19673 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 191 Data size: 19673 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 191 Data size: 19673 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: double), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: int) - sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a -join tab b on a.k1 = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a -join tab b on a.k1 = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (ONE_TO_ONE_EDGE) - Reducer 3 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) - Reducer 4 <- Reducer 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: tab_part - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col1 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 242 Data size: 24926 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col2 (type: double), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 242 Data size: 24926 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 242 Data size: 24926 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1, _col2 - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int), substr(_col2, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: double), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: int) - sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a - join tab b on a.k1 = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a - join tab b on a.k1 = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Reducer 4 (CUSTOM_EDGE) - Map 3 <- Map 2 (CUSTOM_EDGE) - Reducer 4 <- Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col1 (type: int) - outputColumnNames: _col0, _col1, _col2 - input vertices: - 1 Reducer 4 - Statistics: Num rows: 242 Data size: 24926 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col2 (type: double), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 242 Data size: 24926 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 242 Data size: 24926 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: tab_part - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: tab - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1, _col2 - input vertices: - 0 Map 2 - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int), substr(_col2, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: double), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: int) - sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a -join tab_part b on a.k1 = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a -join tab_part b on a.k1 = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 3 <- Map 5 (BROADCAST_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (ONE_TO_ONE_EDGE) - Reducer 4 <- Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: x - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1 - input vertices: - 1 Map 5 - Statistics: Num rows: 382 Data size: 36290 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), substr(_col1, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 382 Data size: 36290 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: y - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col1 (type: int) - outputColumnNames: _col1, _col2, _col3 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col3 (type: int), _col2 (type: double), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: double), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: int) - sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a - join tab_part b on a.k1 = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.k1, a.v1, b.value -from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a - join tab_part b on a.k1 = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Reducer 3 (CUSTOM_EDGE) - Map 2 <- Map 4 (CUSTOM_EDGE) - Reducer 3 <- Map 2 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col1 (type: int) - outputColumnNames: _col1, _col2, _col3 - input vertices: - 1 Reducer 3 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col3 (type: int), _col2 (type: double), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: x - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1 - input vertices: - 1 Map 4 - Statistics: Num rows: 382 Data size: 36290 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), substr(_col1, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 382 Data size: 36290 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: y - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: double), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: int) - sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: c - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - Inner Join 0 to 2 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - 2 _col0 (type: int) - outputColumnNames: _col0, _col1, _col4 - Statistics: Num rows: 646 Data size: 120156 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 646 Data size: 120156 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 646 Data size: 120156 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (CUSTOM_EDGE), Map 3 (CUSTOM_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - Inner Join 0 to 2 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - 2 _col0 (type: int) - outputColumnNames: _col0, _col1, _col4 - input vertices: - 1 Map 2 - 2 Map 3 - Statistics: Num rows: 646 Data size: 120156 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 646 Data size: 120156 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 646 Data size: 120156 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: c - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, c.value -from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, c.value -from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 3 (BROADCAST_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1 - input vertices: - 1 Map 3 - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: y - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: c - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 645 Data size: 119970 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 645 Data size: 119970 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 645 Data size: 119970 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, c.value -from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, c.value -from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (CUSTOM_EDGE), Map 3 (CUSTOM_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1 - input vertices: - 1 Map 2 - Statistics: Num rows: 408 Data size: 38760 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 1 Map 3 - Statistics: Num rows: 645 Data size: 119970 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 645 Data size: 119970 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 645 Data size: 119970 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: y - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: c - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab_part b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab_part b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Map 4 (SIMPLE_EDGE), Reducer 2 (ONE_TO_ONE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcbucket_mapjoin - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), substr(value, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 204 Data size: 21012 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 204 Data size: 21012 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 204 Data size: 21012 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab_part b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab_part b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 3 <- Reducer 2 (CUSTOM_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcbucket_mapjoin - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), substr(value, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Execution mode: llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Reducer 2 - Statistics: Num rows: 204 Data size: 21012 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 204 Data size: 21012 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 204 Data size: 21012 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 121 Data size: 1452 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab_part b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab_part b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Map 4 (SIMPLE_EDGE), Reducer 2 (ONE_TO_ONE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcbucket_mapjoin - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), substr(value, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: complete - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab_part b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab_part b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 3 <- Reducer 2 (CUSTOM_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcbucket_mapjoin - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), substr(value, 5) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Reducer 2 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 258 Data size: 26574 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int) - mode: complete - outputColumnNames: _col0, _col1 - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 153 Data size: 1836 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: double) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: value is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: value is not null (type: boolean) - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: value (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: value is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: value is not null (type: boolean) - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: value (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab1 -POSTHOOK: query: CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab1 -PREHOOK: query: insert overwrite table tab1 -select key,value from srcbucket_mapjoin -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab1 -POSTHOOK: query: insert overwrite table tab1 -select key,value from srcbucket_mapjoin -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab1 -POSTHOOK: Lineage: tab1.key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab1.value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: explain -select a.key, a.value, b.value -from tab1 a join tab_part b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab1 a join tab_part b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab1 a join tab_part b on a.key = b.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab1 a join tab_part b on a.key = b.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 2 <- Map 1 (CUSTOM_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Map 1 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: value is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: c - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 _col1 (type: string) - outputColumnNames: _col0, _col2 - Statistics: Num rows: 814 Data size: 6512 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col2 - input vertices: - 1 Map 4 - Statistics: Num rows: 1375 Data size: 11000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col2 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1375 Data size: 11000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1375 Data size: 11000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (CUSTOM_EDGE), Map 3 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 _col1 (type: string) - outputColumnNames: _col0, _col2 - input vertices: - 1 Map 2 - Statistics: Num rows: 814 Data size: 6512 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col2 - input vertices: - 1 Map 3 - Statistics: Num rows: 1375 Data size: 11000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col2 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1375 Data size: 11000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1375 Data size: 11000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: value is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: c - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.ds = b.ds -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.ds = b.ds -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string), ds (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col2 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col2 (type: string) - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string), ds (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col2 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col2 (type: string) - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Select Operator - expressions: _col2 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE - Dynamic Partitioning Event Operator - Target column: ds (string) - Target Input: a - Partition key expr: ds - Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE - Target Vertex: Map 1 - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int), _col2 (type: string) - 1 _col0 (type: int), _col2 (type: string) - outputColumnNames: _col0, _col1, _col4 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.ds = b.ds -PREHOOK: type: QUERY -POSTHOOK: query: explain -select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.ds = b.ds -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string), ds (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col2 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col2 (type: string) - Statistics: Num rows: 242 Data size: 67518 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string), ds (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col2 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col2 (type: string) - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Select Operator - expressions: _col2 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE - Dynamic Partitioning Event Operator - Target column: ds (string) - Target Input: a - Partition key expr: ds - Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE - Target Vertex: Map 1 - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int), _col2 (type: string) - 1 _col0 (type: int), _col2 (type: string) - outputColumnNames: _col0, _col1, _col4 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 408 Data size: 75888 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain select a.key, a.value, b.value - from tab a join tab_part b on a.key = b.key and a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key, a.value, b.value - from tab a join tab_part b on a.key = b.key and a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int), _col1 (type: string) - 1 _col0 (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -2 val_2 val_2 -4 val_4 val_4 -8 val_8 val_8 -11 val_11 val_11 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -17 val_17 val_17 -19 val_19 val_19 -20 val_20 val_20 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -28 val_28 val_28 -33 val_33 val_33 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -44 val_44 val_44 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -53 val_53 val_53 -57 val_57 val_57 -64 val_64 val_64 -66 val_66 val_66 -77 val_77 val_77 -80 val_80 val_80 -82 val_82 val_82 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -86 val_86 val_86 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -105 val_105 val_105 -114 val_114 val_114 -116 val_116 val_116 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -136 val_136 val_136 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -143 val_143 val_143 -145 val_145 val_145 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -150 val_150 val_150 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -156 val_156 val_156 -158 val_158 val_158 -163 val_163 val_163 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -170 val_170 val_170 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -178 val_178 val_178 -181 val_181 val_181 -183 val_183 val_183 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -189 val_189 val_189 -190 val_190 val_190 -192 val_192 val_192 -194 val_194 val_194 -196 val_196 val_196 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -202 val_202 val_202 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -222 val_222 val_222 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -226 val_226 val_226 -228 val_228 val_228 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -235 val_235 val_235 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -244 val_244 val_244 -248 val_248 val_248 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -257 val_257 val_257 -260 val_260 val_260 -262 val_262 val_262 -266 val_266 val_266 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -275 val_275 val_275 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -284 val_284 val_284 -286 val_286 val_286 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -291 val_291 val_291 -305 val_305 val_305 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -310 val_310 val_310 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -323 val_323 val_323 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -332 val_332 val_332 -336 val_336 val_336 -338 val_338 val_338 -341 val_341 val_341 -345 val_345 val_345 -356 val_356 val_356 -365 val_365 val_365 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -374 val_374 val_374 -378 val_378 val_378 -389 val_389 val_389 -392 val_392 val_392 -394 val_394 val_394 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -400 val_400 val_400 -402 val_402 val_402 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -411 val_411 val_411 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -419 val_419 val_419 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -435 val_435 val_435 -437 val_437 val_437 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -444 val_444 val_444 -446 val_446 val_446 -448 val_448 val_448 -453 val_453 val_453 -455 val_455 val_455 -457 val_457 val_457 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -460 val_460 val_460 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -475 val_475 val_475 -477 val_477 val_477 -479 val_479 val_479 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -482 val_482 val_482 -484 val_484 val_484 -491 val_491 val_491 -493 val_493 val_493 -495 val_495 val_495 -497 val_497 val_497 -PREHOOK: query: explain select a.key, a.value, b.value - from tab a join tab_part b on a.key = b.key and a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key, a.value, b.value - from tab a join tab_part b on a.key = b.key and a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 2 <- Map 1 (CUSTOM_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int), _col1 (type: string) - 1 _col0 (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Map 1 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -2 val_2 val_2 -4 val_4 val_4 -8 val_8 val_8 -11 val_11 val_11 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -17 val_17 val_17 -19 val_19 val_19 -20 val_20 val_20 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -28 val_28 val_28 -33 val_33 val_33 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -44 val_44 val_44 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -53 val_53 val_53 -57 val_57 val_57 -64 val_64 val_64 -66 val_66 val_66 -77 val_77 val_77 -80 val_80 val_80 -82 val_82 val_82 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -86 val_86 val_86 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -105 val_105 val_105 -114 val_114 val_114 -116 val_116 val_116 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -136 val_136 val_136 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -143 val_143 val_143 -145 val_145 val_145 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -150 val_150 val_150 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -156 val_156 val_156 -158 val_158 val_158 -163 val_163 val_163 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -170 val_170 val_170 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -178 val_178 val_178 -181 val_181 val_181 -183 val_183 val_183 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -189 val_189 val_189 -190 val_190 val_190 -192 val_192 val_192 -194 val_194 val_194 -196 val_196 val_196 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -202 val_202 val_202 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -222 val_222 val_222 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -226 val_226 val_226 -228 val_228 val_228 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -235 val_235 val_235 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -244 val_244 val_244 -248 val_248 val_248 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -257 val_257 val_257 -260 val_260 val_260 -262 val_262 val_262 -266 val_266 val_266 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -275 val_275 val_275 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -284 val_284 val_284 -286 val_286 val_286 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -291 val_291 val_291 -305 val_305 val_305 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -310 val_310 val_310 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -323 val_323 val_323 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -332 val_332 val_332 -336 val_336 val_336 -338 val_338 val_338 -341 val_341 val_341 -345 val_345 val_345 -356 val_356 val_356 -365 val_365 val_365 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -374 val_374 val_374 -378 val_378 val_378 -389 val_389 val_389 -392 val_392 val_392 -394 val_394 val_394 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -400 val_400 val_400 -402 val_402 val_402 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -411 val_411 val_411 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -419 val_419 val_419 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -435 val_435 val_435 -437 val_437 val_437 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -444 val_444 val_444 -446 val_446 val_446 -448 val_448 val_448 -453 val_453 val_453 -455 val_455 val_455 -457 val_457 val_457 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -460 val_460 val_460 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -475 val_475 val_475 -477 val_477 val_477 -479 val_479 val_479 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -482 val_482 val_482 -484 val_484 val_484 -491 val_491 val_491 -493 val_493 val_493 -495 val_495 val_495 -497 val_497 val_497 -PREHOOK: query: CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab2 -POSTHOOK: query: CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab2 -PREHOOK: query: insert overwrite table tab2 partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab2@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab2 partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab2@ds=2008-04-08 -POSTHOOK: Lineage: tab2 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab2 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: analyze table tab2 compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab2 -PREHOOK: Input: default@tab2@ds=2008-04-08 -PREHOOK: Output: default@tab2 -PREHOOK: Output: default@tab2@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: analyze table tab2 compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab2 -POSTHOOK: Input: default@tab2@ds=2008-04-08 -POSTHOOK: Output: default@tab2 -POSTHOOK: Output: default@tab2@ds=2008-04-08 -#### A masked pattern was here #### -PREHOOK: query: explain select a.key, a.value, b.value - from tab2 a join tab_part b on a.key = b.key and a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key, a.value, b.value - from tab2 a join tab_part b on a.key = b.key and a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int), _col1 (type: string) - 1 _col0 (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select a.key, a.value, b.value -from tab2 a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@tab2 -PREHOOK: Input: default@tab2@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select a.key, a.value, b.value -from tab2 a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab2 -POSTHOOK: Input: default@tab2@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -2 val_2 val_2 -4 val_4 val_4 -8 val_8 val_8 -11 val_11 val_11 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -17 val_17 val_17 -19 val_19 val_19 -20 val_20 val_20 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -28 val_28 val_28 -33 val_33 val_33 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -44 val_44 val_44 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -53 val_53 val_53 -57 val_57 val_57 -64 val_64 val_64 -66 val_66 val_66 -77 val_77 val_77 -80 val_80 val_80 -82 val_82 val_82 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -86 val_86 val_86 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -105 val_105 val_105 -114 val_114 val_114 -116 val_116 val_116 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -136 val_136 val_136 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -143 val_143 val_143 -145 val_145 val_145 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -150 val_150 val_150 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -156 val_156 val_156 -158 val_158 val_158 -163 val_163 val_163 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -170 val_170 val_170 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -178 val_178 val_178 -181 val_181 val_181 -183 val_183 val_183 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -189 val_189 val_189 -190 val_190 val_190 -192 val_192 val_192 -194 val_194 val_194 -196 val_196 val_196 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -202 val_202 val_202 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -222 val_222 val_222 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -226 val_226 val_226 -228 val_228 val_228 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -235 val_235 val_235 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -244 val_244 val_244 -248 val_248 val_248 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -257 val_257 val_257 -260 val_260 val_260 -262 val_262 val_262 -266 val_266 val_266 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -275 val_275 val_275 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -284 val_284 val_284 -286 val_286 val_286 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -291 val_291 val_291 -305 val_305 val_305 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -310 val_310 val_310 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -323 val_323 val_323 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -332 val_332 val_332 -336 val_336 val_336 -338 val_338 val_338 -341 val_341 val_341 -345 val_345 val_345 -356 val_356 val_356 -365 val_365 val_365 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -374 val_374 val_374 -378 val_378 val_378 -389 val_389 val_389 -392 val_392 val_392 -394 val_394 val_394 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -400 val_400 val_400 -402 val_402 val_402 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -411 val_411 val_411 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -419 val_419 val_419 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -435 val_435 val_435 -437 val_437 val_437 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -444 val_444 val_444 -446 val_446 val_446 -448 val_448 val_448 -453 val_453 val_453 -455 val_455 val_455 -457 val_457 val_457 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -460 val_460 val_460 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -475 val_475 val_475 -477 val_477 val_477 -479 val_479 val_479 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -482 val_482 val_482 -484 val_484 val_484 -491 val_491 val_491 -493 val_493 val_493 -495 val_495 val_495 -497 val_497 val_497 -PREHOOK: query: explain select a.key, a.value, b.value - from tab2 a join tab_part b on a.key = b.key and a.value = b.value -PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key, a.value, b.value - from tab2 a join tab_part b on a.key = b.key and a.value = b.value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 2 <- Map 1 (CUSTOM_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: b - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (key is not null and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int), _col1 (type: string) - 1 _col0 (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Map 1 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 394 Data size: 73284 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select a.key, a.value, b.value -from tab2 a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@tab2 -PREHOOK: Input: default@tab2@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select a.key, a.value, b.value -from tab2 a join tab_part b on a.key = b.key and a.value = b.value -order by a.key, a.value, b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab2 -POSTHOOK: Input: default@tab2@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -0 val_0 val_0 -2 val_2 val_2 -4 val_4 val_4 -8 val_8 val_8 -11 val_11 val_11 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -15 val_15 val_15 -17 val_17 val_17 -19 val_19 val_19 -20 val_20 val_20 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -24 val_24 val_24 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -26 val_26 val_26 -28 val_28 val_28 -33 val_33 val_33 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -35 val_35 val_35 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -37 val_37 val_37 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -42 val_42 val_42 -44 val_44 val_44 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -51 val_51 val_51 -53 val_53 val_53 -57 val_57 val_57 -64 val_64 val_64 -66 val_66 val_66 -77 val_77 val_77 -80 val_80 val_80 -82 val_82 val_82 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -84 val_84 val_84 -86 val_86 val_86 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -95 val_95 val_95 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -97 val_97 val_97 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -103 val_103 val_103 -105 val_105 val_105 -114 val_114 val_114 -116 val_116 val_116 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -118 val_118 val_118 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -125 val_125 val_125 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -129 val_129 val_129 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -134 val_134 val_134 -136 val_136 val_136 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -138 val_138 val_138 -143 val_143 val_143 -145 val_145 val_145 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -149 val_149 val_149 -150 val_150 val_150 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -152 val_152 val_152 -156 val_156 val_156 -158 val_158 val_158 -163 val_163 val_163 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -165 val_165 val_165 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -167 val_167 val_167 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -169 val_169 val_169 -170 val_170 val_170 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -172 val_172 val_172 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -174 val_174 val_174 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -176 val_176 val_176 -178 val_178 val_178 -181 val_181 val_181 -183 val_183 val_183 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -187 val_187 val_187 -189 val_189 val_189 -190 val_190 val_190 -192 val_192 val_192 -194 val_194 val_194 -196 val_196 val_196 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -200 val_200 val_200 -202 val_202 val_202 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -208 val_208 val_208 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -213 val_213 val_213 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -217 val_217 val_217 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -219 val_219 val_219 -222 val_222 val_222 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -224 val_224 val_224 -226 val_226 val_226 -228 val_228 val_228 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -233 val_233 val_233 -235 val_235 val_235 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -237 val_237 val_237 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -239 val_239 val_239 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -242 val_242 val_242 -244 val_244 val_244 -248 val_248 val_248 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -255 val_255 val_255 -257 val_257 val_257 -260 val_260 val_260 -262 val_262 val_262 -266 val_266 val_266 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -273 val_273 val_273 -275 val_275 val_275 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -277 val_277 val_277 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -280 val_280 val_280 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -282 val_282 val_282 -284 val_284 val_284 -286 val_286 val_286 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -288 val_288 val_288 -291 val_291 val_291 -305 val_305 val_305 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -307 val_307 val_307 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -309 val_309 val_309 -310 val_310 val_310 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -316 val_316 val_316 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -318 val_318 val_318 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -321 val_321 val_321 -323 val_323 val_323 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -325 val_325 val_325 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -327 val_327 val_327 -332 val_332 val_332 -336 val_336 val_336 -338 val_338 val_338 -341 val_341 val_341 -345 val_345 val_345 -356 val_356 val_356 -365 val_365 val_365 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -367 val_367 val_367 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -369 val_369 val_369 -374 val_374 val_374 -378 val_378 val_378 -389 val_389 val_389 -392 val_392 val_392 -394 val_394 val_394 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -396 val_396 val_396 -400 val_400 val_400 -402 val_402 val_402 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -404 val_404 val_404 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -406 val_406 val_406 -411 val_411 val_411 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -413 val_413 val_413 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -417 val_417 val_417 -419 val_419 val_419 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -424 val_424 val_424 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -431 val_431 val_431 -435 val_435 val_435 -437 val_437 val_437 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -439 val_439 val_439 -444 val_444 val_444 -446 val_446 val_446 -448 val_448 val_448 -453 val_453 val_453 -455 val_455 val_455 -457 val_457 val_457 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -459 val_459 val_459 -460 val_460 val_460 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -462 val_462 val_462 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -466 val_466 val_466 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -468 val_468 val_468 -475 val_475 val_475 -477 val_477 val_477 -479 val_479 val_479 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -480 val_480 val_480 -482 val_482 val_482 -484 val_484 val_484 -491 val_491 val_491 -493 val_493 val_493 -495 val_495 val_495 -497 val_497 val_497 diff --git ql/src/test/results/clientpositive/llap/clusterctas.q.out ql/src/test/results/clientpositive/llap/clusterctas.q.out deleted file mode 100644 index 40ceee215f..0000000000 --- ql/src/test/results/clientpositive/llap/clusterctas.q.out +++ /dev/null @@ -1,145 +0,0 @@ -PREHOOK: query: EXPLAIN -CREATE TABLE x STORED AS ORC TBLPROPERTIES('transactional'='true') AS -SELECT * FROM SRC x CLUSTER BY x.key -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@x -POSTHOOK: query: EXPLAIN -CREATE TABLE x STORED AS ORC TBLPROPERTIES('transactional'='true') AS -SELECT * FROM SRC x CLUSTER BY x.key -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@x -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-4 depends on stages: Stage-0, Stage-2 - Stage-3 depends on stages: Stage-4 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.x - Write Type: INSERT - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: col1, col2 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll') - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: struct), _col1 (type: struct) - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-4 - Create Table - columns: key string, value string - name: default.x - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - table properties: - transactional true - - Stage: Stage-3 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: key, value - Column Types: string, string - Table: default.x - - Stage: Stage-0 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - Write Type: INSERT - -PREHOOK: query: CREATE TABLE x STORED AS ORC TBLPROPERTIES('transactional'='true') AS -SELECT * FROM SRC x CLUSTER BY x.key -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@x -POSTHOOK: query: CREATE TABLE x STORED AS ORC TBLPROPERTIES('transactional'='true') AS -SELECT * FROM SRC x CLUSTER BY x.key -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@x -POSTHOOK: Lineage: x.key SIMPLE [(src)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: x.value SIMPLE [(src)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DROP TABLE x -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@x -PREHOOK: Output: default@x -POSTHOOK: query: DROP TABLE x -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@x -POSTHOOK: Output: default@x diff --git ql/src/test/results/clientpositive/llap/hashjoin.q.out ql/src/test/results/clientpositive/llap/hashjoin.q.out deleted file mode 100644 index b95e69a8b3..0000000000 --- ql/src/test/results/clientpositive/llap/hashjoin.q.out +++ /dev/null @@ -1,586 +0,0 @@ -PREHOOK: query: CREATE TABLE t_hashjoin_big( - cint int, - cvarchar varchar(50), - cdouble double, - a int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_hashjoin_big -POSTHOOK: query: CREATE TABLE t_hashjoin_big( - cint int, - cvarchar varchar(50), - cdouble double, - a int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_hashjoin_big -PREHOOK: query: CREATE TABLE t_hashjoin_small( - cint int, - cvarchar varchar(50), - cdouble double -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_hashjoin_small -POSTHOOK: query: CREATE TABLE t_hashjoin_small( - cint int, - cvarchar varchar(50), - cdouble double -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_hashjoin_small -PREHOOK: query: INSERT INTO t_hashjoin_big VALUES -(5, 'two', 3.0, 1), -(6, 'two', 1.5, 2), -(NULL, NULL, NULL, NULL), -(7, 'eight', 4.2, 3), (7, 'eight', 4.2, 4), (7, 'eight', 4.2, 5), -(5, 'one', 2.8, 6), (5, 'one', 2.8, 7), (5, 'one', 2.8, 8) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_hashjoin_big -POSTHOOK: query: INSERT INTO t_hashjoin_big VALUES -(5, 'two', 3.0, 1), -(6, 'two', 1.5, 2), -(NULL, NULL, NULL, NULL), -(7, 'eight', 4.2, 3), (7, 'eight', 4.2, 4), (7, 'eight', 4.2, 5), -(5, 'one', 2.8, 6), (5, 'one', 2.8, 7), (5, 'one', 2.8, 8) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_hashjoin_big -POSTHOOK: Lineage: t_hashjoin_big.a SCRIPT [] -POSTHOOK: Lineage: t_hashjoin_big.cdouble SCRIPT [] -POSTHOOK: Lineage: t_hashjoin_big.cint SCRIPT [] -POSTHOOK: Lineage: t_hashjoin_big.cvarchar SCRIPT [] -PREHOOK: query: INSERT INTO t_hashjoin_small VALUES -(7, 'two', 1.5), -(5, 'two', 4.2), -(NULL, NULL, NULL), -(5, 'one', 1.1), (5, 'one', 1.1) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_hashjoin_small -POSTHOOK: query: INSERT INTO t_hashjoin_small VALUES -(7, 'two', 1.5), -(5, 'two', 4.2), -(NULL, NULL, NULL), -(5, 'one', 1.1), (5, 'one', 1.1) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_hashjoin_small -POSTHOOK: Lineage: t_hashjoin_small.cdouble SCRIPT [] -POSTHOOK: Lineage: t_hashjoin_small.cint SCRIPT [] -POSTHOOK: Lineage: t_hashjoin_small.cvarchar SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - filterExpr: cint is not null (type: boolean) - Statistics: Num rows: 9 Data size: 927 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: cint is not null (type: boolean) - Statistics: Num rows: 8 Data size: 824 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 cint (type: int) - 1 cint (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9 - input vertices: - 1 Map 2 - Statistics: Num rows: 10 Data size: 2010 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: varchar(50)), _col2 (type: double), _col3 (type: int), _col7 (type: int), _col8 (type: varchar(50)), _col9 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 10 Data size: 2010 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 2010 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: z - filterExpr: cint is not null (type: boolean) - Statistics: Num rows: 5 Data size: 490 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: cint is not null (type: boolean) - Statistics: Num rows: 4 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: cint (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: cint (type: int) - Statistics: Num rows: 4 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: cvarchar (type: varchar(50)), cdouble (type: double) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -7 eight 4.2 3 7 two 1.5 -7 eight 4.2 4 7 two 1.5 -7 eight 4.2 5 7 two 1.5 -5 two 3.0 1 5 two 4.2 -5 two 3.0 1 5 one 1.1 -5 two 3.0 1 5 one 1.1 -5 one 2.8 6 5 two 4.2 -5 one 2.8 6 5 one 1.1 -5 one 2.8 6 5 one 1.1 -5 one 2.8 7 5 two 4.2 -5 one 2.8 7 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 8 5 two 4.2 -5 one 2.8 8 5 one 1.1 -5 one 2.8 8 5 one 1.1 -PREHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cvarchar = z.cvarchar) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cvarchar = z.cvarchar) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - filterExpr: cvarchar is not null (type: boolean) - Statistics: Num rows: 9 Data size: 927 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: cvarchar is not null (type: boolean) - Statistics: Num rows: 8 Data size: 824 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 cvarchar (type: varchar(50)) - 1 cvarchar (type: varchar(50)) - outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9 - input vertices: - 1 Map 2 - Statistics: Num rows: 10 Data size: 2010 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: varchar(50)), _col2 (type: double), _col3 (type: int), _col7 (type: int), _col8 (type: varchar(50)), _col9 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 10 Data size: 2010 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 2010 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: z - filterExpr: cvarchar is not null (type: boolean) - Statistics: Num rows: 5 Data size: 490 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: cvarchar is not null (type: boolean) - Statistics: Num rows: 4 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: cvarchar (type: varchar(50)) - null sort order: z - sort order: + - Map-reduce partition columns: cvarchar (type: varchar(50)) - Statistics: Num rows: 4 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: cint (type: int), cdouble (type: double) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cvarchar = z.cvarchar) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cvarchar = z.cvarchar) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -5 two 3.0 1 7 two 1.5 -5 two 3.0 1 5 two 4.2 -6 two 1.5 2 7 two 1.5 -6 two 1.5 2 5 two 4.2 -5 one 2.8 6 5 one 1.1 -5 one 2.8 6 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 8 5 one 1.1 -5 one 2.8 8 5 one 1.1 -PREHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - filterExpr: (cint is not null and cvarchar is not null) (type: boolean) - Statistics: Num rows: 9 Data size: 927 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (cint is not null and cvarchar is not null) (type: boolean) - Statistics: Num rows: 7 Data size: 721 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 cint (type: int), cvarchar (type: varchar(50)) - 1 cint (type: int), cvarchar (type: varchar(50)) - outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9 - input vertices: - 1 Map 2 - Statistics: Num rows: 7 Data size: 1407 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: varchar(50)), _col2 (type: double), _col3 (type: int), _col7 (type: int), _col8 (type: varchar(50)), _col9 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 7 Data size: 1407 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 1407 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: z - filterExpr: (cint is not null and cvarchar is not null) (type: boolean) - Statistics: Num rows: 5 Data size: 490 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (cint is not null and cvarchar is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: cint (type: int), cvarchar (type: varchar(50)) - null sort order: zz - sort order: ++ - Map-reduce partition columns: cint (type: int), cvarchar (type: varchar(50)) - Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: cdouble (type: double) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -5 two 3.0 1 5 two 4.2 -5 one 2.8 6 5 one 1.1 -5 one 2.8 6 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 8 5 one 1.1 -5 one 2.8 8 5 one 1.1 -PREHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x LEFT OUTER JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x LEFT OUTER JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - Statistics: Num rows: 9 Data size: 927 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 cint (type: int), cvarchar (type: varchar(50)) - 1 cint (type: int), cvarchar (type: varchar(50)) - outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9 - input vertices: - 1 Map 2 - Statistics: Num rows: 19 Data size: 3427 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: varchar(50)), _col2 (type: double), _col3 (type: int), _col7 (type: int), _col8 (type: varchar(50)), _col9 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 19 Data size: 3427 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 19 Data size: 3427 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: z - Statistics: Num rows: 5 Data size: 490 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: cint (type: int), cvarchar (type: varchar(50)) - null sort order: zz - sort order: ++ - Map-reduce partition columns: cint (type: int), cvarchar (type: varchar(50)) - Statistics: Num rows: 5 Data size: 490 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: cdouble (type: double) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM t_hashjoin_big x LEFT OUTER JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t_hashjoin_big x LEFT OUTER JOIN t_hashjoin_small z ON (x.cint = z.cint AND x.cvarchar = z.cvarchar) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -5 two 3.0 1 5 two 4.2 -6 two 1.5 2 NULL NULL NULL -NULL NULL NULL NULL NULL NULL NULL -7 eight 4.2 3 NULL NULL NULL -7 eight 4.2 4 NULL NULL NULL -7 eight 4.2 5 NULL NULL NULL -5 one 2.8 6 5 one 1.1 -5 one 2.8 6 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 7 5 one 1.1 -5 one 2.8 8 5 one 1.1 -5 one 2.8 8 5 one 1.1 -PREHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cdouble = z.cdouble) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cdouble = z.cdouble) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Map 2 (BROADCAST_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: x - filterExpr: cdouble is not null (type: boolean) - Statistics: Num rows: 9 Data size: 927 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: cdouble is not null (type: boolean) - Statistics: Num rows: 8 Data size: 824 Basic stats: COMPLETE Column stats: COMPLETE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 cdouble (type: double) - 1 cdouble (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9 - input vertices: - 1 Map 2 - Statistics: Num rows: 8 Data size: 1608 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: varchar(50)), _col2 (type: double), _col3 (type: int), _col7 (type: int), _col8 (type: varchar(50)), _col9 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 8 Data size: 1608 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 1608 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 2 - Map Operator Tree: - TableScan - alias: z - filterExpr: cdouble is not null (type: boolean) - Statistics: Num rows: 5 Data size: 490 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: cdouble is not null (type: boolean) - Statistics: Num rows: 4 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: cdouble (type: double) - null sort order: z - sort order: + - Map-reduce partition columns: cdouble (type: double) - Statistics: Num rows: 4 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: cint (type: int), cvarchar (type: varchar(50)) - Execution mode: vectorized, llap - LLAP IO: no inputs - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cdouble = z.cdouble) -PREHOOK: type: QUERY -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t_hashjoin_big x JOIN t_hashjoin_small z ON (x.cdouble = z.cdouble) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Input: default@t_hashjoin_small -#### A masked pattern was here #### -6 two 1.5 2 7 two 1.5 -7 eight 4.2 3 5 two 4.2 -7 eight 4.2 4 5 two 4.2 -7 eight 4.2 5 5 two 4.2 -PREHOOK: query: DROP TABLE t_hashjoin_big -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_hashjoin_big -PREHOOK: Output: default@t_hashjoin_big -POSTHOOK: query: DROP TABLE t_hashjoin_big -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_hashjoin_big -POSTHOOK: Output: default@t_hashjoin_big -PREHOOK: query: DROP TABLE t_hashjoin_small -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_hashjoin_small -PREHOOK: Output: default@t_hashjoin_small -POSTHOOK: query: DROP TABLE t_hashjoin_small -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_hashjoin_small -POSTHOOK: Output: default@t_hashjoin_small diff --git ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out deleted file mode 100644 index c47452fabd..0000000000 --- ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out +++ /dev/null @@ -1,256 +0,0 @@ -PREHOOK: query: CREATE TABLE table_16 ( -timestamp_col_19 timestamp, -timestamp_col_29 timestamp -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@table_16 -POSTHOOK: query: CREATE TABLE table_16 ( -timestamp_col_19 timestamp, -timestamp_col_29 timestamp -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_16 -PREHOOK: query: INSERT INTO table_16(timestamp_col_19, timestamp_col_29) VALUES -('2018-01-10 15:03:55.0', '2018-01-10 15:04:55.0'), -('2018-01-10 15:03:55.0', '2018-01-10 15:04:55.0'), -('2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0'), -('2020-01-01 00:00:01.0', '2020-01-01 00:00:02.0') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@table_16 -POSTHOOK: query: INSERT INTO table_16(timestamp_col_19, timestamp_col_29) VALUES -('2018-01-10 15:03:55.0', '2018-01-10 15:04:55.0'), -('2018-01-10 15:03:55.0', '2018-01-10 15:04:55.0'), -('2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0'), -('2020-01-01 00:00:01.0', '2020-01-01 00:00:02.0') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@table_16 -POSTHOOK: Lineage: table_16.timestamp_col_19 SCRIPT [] -POSTHOOK: Lineage: table_16.timestamp_col_29 SCRIPT [] -PREHOOK: query: CREATE TABLE table_7 ( -int_col_10 int, -bigint_col_3 bigint -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@table_7 -POSTHOOK: query: CREATE TABLE table_7 ( -int_col_10 int, -bigint_col_3 bigint -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_7 -PREHOOK: query: INSERT INTO table_7(int_col_10, bigint_col_3) VALUES -(3, 200), -(3, 100), -(2, 250), -(2, 280), -(2, 50) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@table_7 -POSTHOOK: query: INSERT INTO table_7(int_col_10, bigint_col_3) VALUES -(3, 200), -(3, 100), -(2, 250), -(2, 280), -(2, 50) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@table_7 -POSTHOOK: Lineage: table_7.bigint_col_3 SCRIPT [] -POSTHOOK: Lineage: table_7.int_col_10 SCRIPT [] -PREHOOK: query: CREATE TABLE table_10 ( -boolean_col_16 boolean, -timestamp_col_5 timestamp, -timestamp_col_15 timestamp, -timestamp_col_30 timestamp, -int_col_18 int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@table_10 -POSTHOOK: query: CREATE TABLE table_10 ( -boolean_col_16 boolean, -timestamp_col_5 timestamp, -timestamp_col_15 timestamp, -timestamp_col_30 timestamp, -int_col_18 int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_10 -PREHOOK: query: INSERT INTO table_10(boolean_col_16, timestamp_col_5, timestamp_col_15, timestamp_col_30, int_col_18) VALUES -(true, '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', 11), -(true, '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', 11), -(true, '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', 11), -(true, '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', 15), -(true, '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', 15), -(true, '2018-03-10 03:05:01.0', '2018-03-10 03:05:01.0', '2018-03-10 03:05:01.0', 18) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@table_10 -POSTHOOK: query: INSERT INTO table_10(boolean_col_16, timestamp_col_5, timestamp_col_15, timestamp_col_30, int_col_18) VALUES -(true, '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', 11), -(true, '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', 11), -(true, '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', '2018-01-10 15:03:55.0', 11), -(true, '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', 15), -(true, '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', '2018-02-10 07:12:55.0', 15), -(true, '2018-03-10 03:05:01.0', '2018-03-10 03:05:01.0', '2018-03-10 03:05:01.0', 18) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@table_10 -POSTHOOK: Lineage: table_10.boolean_col_16 SCRIPT [] -POSTHOOK: Lineage: table_10.int_col_18 SCRIPT [] -POSTHOOK: Lineage: table_10.timestamp_col_15 SCRIPT [] -POSTHOOK: Lineage: table_10.timestamp_col_30 SCRIPT [] -POSTHOOK: Lineage: table_10.timestamp_col_5 SCRIPT [] -Warning: Shuffle Join MERGEJOIN[48][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: explain cbo -SELECT - DISTINCT COALESCE(a4.timestamp_col_15, IF(a4.boolean_col_16, a4.timestamp_col_30, a4.timestamp_col_5)) AS timestamp_col -FROM table_7 a3 -RIGHT JOIN table_10 a4 -WHERE (a3.bigint_col_3) >= (a4.int_col_18) -INTERSECT ALL -SELECT - COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST(COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) AS timestamp_col -FROM table_16 a1 - GROUP BY COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST( - COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@table_10 -PREHOOK: Input: default@table_16 -PREHOOK: Input: default@table_7 -#### A masked pattern was here #### -POSTHOOK: query: explain cbo -SELECT - DISTINCT COALESCE(a4.timestamp_col_15, IF(a4.boolean_col_16, a4.timestamp_col_30, a4.timestamp_col_5)) AS timestamp_col -FROM table_7 a3 -RIGHT JOIN table_10 a4 -WHERE (a3.bigint_col_3) >= (a4.int_col_18) -INTERSECT ALL -SELECT - COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST(COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) AS timestamp_col -FROM table_16 a1 - GROUP BY COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST( - COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_10 -POSTHOOK: Input: default@table_16 -POSTHOOK: Input: default@table_7 -#### A masked pattern was here #### -CBO PLAN: -HiveProject($f0=[$1]) - HiveTableFunctionScan(invocation=[replicate_rows($0, $1)], rowType=[RecordType(BIGINT $f0, TIMESTAMP(9) $f1)]) - HiveProject($f0=[$2], $f1=[$0]) - HiveFilter(condition=[=($1, 2)]) - HiveAggregate(group=[{0}], agg#0=[count($1)], agg#1=[min($1)]) - HiveProject($f0=[$0], $f1=[$1]) - HiveUnion(all=[true]) - HiveProject($f0=[$0], $f1=[$1]) - HiveAggregate(group=[{0}], agg#0=[count()]) - HiveProject($f0=[$0]) - HiveAggregate(group=[{0}]) - HiveProject($f0=[CASE(IS NOT NULL($3), $3, if($1, $4, $2))]) - HiveJoin(condition=[>=($0, $5)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(bigint_col_3=[$1]) - HiveFilter(condition=[IS NOT NULL($1)]) - HiveTableScan(table=[[default, table_7]], table:alias=[a3]) - HiveProject(boolean_col_16=[$0], timestamp_col_5=[$1], timestamp_col_15=[$2], timestamp_col_30=[$3], CAST=[CAST($4):BIGINT]) - HiveFilter(condition=[IS NOT NULL(CAST($4):BIGINT)]) - HiveTableScan(table=[[default, table_10]], table:alias=[a4]) - HiveProject($f0=[$0], $f1=[$1]) - HiveAggregate(group=[{0}], agg#0=[count()]) - HiveProject($f0=[$0]) - HiveAggregate(group=[{0}]) - HiveProject($f0=[CASE(IS NOT NULL(least(CASE(IS NOT NULL($0), $0, 2010-03-29 00:00:00:TIMESTAMP(9)), CASE(IS NOT NULL($1), $1, 2014-08-16 00:00:00:TIMESTAMP(9)))), least(CASE(IS NOT NULL($0), $0, 2010-03-29 00:00:00:TIMESTAMP(9)), CASE(IS NOT NULL($1), $1, 2014-08-16 00:00:00:TIMESTAMP(9))), greatest(CASE(IS NOT NULL($0), $0, 2013-07-01 00:00:00:TIMESTAMP(9)), CASE(IS NOT NULL($1), $1, 2028-06-18 00:00:00:TIMESTAMP(9))))]) - HiveTableScan(table=[[default, table_16]], table:alias=[a1]) - -Warning: Shuffle Join MERGEJOIN[48][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: SELECT - DISTINCT COALESCE(a4.timestamp_col_15, IF(a4.boolean_col_16, a4.timestamp_col_30, a4.timestamp_col_5)) AS timestamp_col -FROM table_7 a3 -RIGHT JOIN table_10 a4 -WHERE (a3.bigint_col_3) >= (a4.int_col_18) -INTERSECT ALL -SELECT - COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST(COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) AS timestamp_col -FROM table_16 a1 - GROUP BY COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST( - COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@table_10 -PREHOOK: Input: default@table_16 -PREHOOK: Input: default@table_7 -#### A masked pattern was here #### -POSTHOOK: query: SELECT - DISTINCT COALESCE(a4.timestamp_col_15, IF(a4.boolean_col_16, a4.timestamp_col_30, a4.timestamp_col_5)) AS timestamp_col -FROM table_7 a3 -RIGHT JOIN table_10 a4 -WHERE (a3.bigint_col_3) >= (a4.int_col_18) -INTERSECT ALL -SELECT - COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST(COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) AS timestamp_col -FROM table_16 a1 - GROUP BY COALESCE(LEAST( - COALESCE(a1.timestamp_col_19, CAST('2010-03-29 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2014-08-16 00:00:00' AS TIMESTAMP)) - ), - GREATEST( - COALESCE(a1.timestamp_col_19, CAST('2013-07-01 00:00:00' AS TIMESTAMP)), - COALESCE(a1.timestamp_col_29, CAST('2028-06-18 00:00:00' AS TIMESTAMP))) - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_10 -POSTHOOK: Input: default@table_16 -POSTHOOK: Input: default@table_7 -#### A masked pattern was here #### -2018-02-10 07:12:55 -2018-01-10 15:03:55 diff --git ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out deleted file mode 100644 index ac34fe4a9d..0000000000 --- ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out +++ /dev/null @@ -1,2190 +0,0 @@ -PREHOOK: query: show partitions srcpart -PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@srcpart -POSTHOOK: query: show partitions srcpart -POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@srcpart -ds=2008-04-08/hr=11 -ds=2008-04-08/hr=12 -ds=2008-04-09/hr=11 -ds=2008-04-09/hr=12 -PREHOOK: query: create table if not exists nzhang_part3 like srcpart -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part3 -POSTHOOK: query: create table if not exists nzhang_part3 like srcpart -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part3 -PREHOOK: query: describe extended nzhang_part3 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part3 -POSTHOOK: query: describe extended nzhang_part3 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part3 -key string default -value string default -ds string -hr string - -# Partition Information -# col_name data_type comment -ds string -hr string - -#### A masked pattern was here #### -PREHOOK: query: explain -insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@nzhang_part3 -POSTHOOK: query: explain -insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: srcpart - filterExpr: (ds is not null and hr is not null) (type: boolean) - Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part3 - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - outputColumnNames: key, value, ds, hr - Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - keys: ds (type: string), hr (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 4992 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 4 Data size: 4992 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col2 (type: struct), _col3 (type: struct) - Execution mode: llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 4992 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col2 (type: struct), _col3 (type: struct), _col0 (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 4992 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 4 Data size: 4992 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-0 - Move Operator - tables: - partition: - ds - hr - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part3 - - Stage: Stage-3 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: key, value - Column Types: string, string - Table: default.nzhang_part3 - -PREHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@nzhang_part3 -POSTHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@nzhang_part3@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@nzhang_part3@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@nzhang_part3@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@nzhang_part3@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from nzhang_part3 where ds is not null and hr is not null -PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part3 -PREHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=11 -PREHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=12 -PREHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=11 -PREHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part3 where ds is not null and hr is not null -POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part3 -POSTHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -0 val_0 2008-04-08 11 -0 val_0 2008-04-08 11 -0 val_0 2008-04-08 11 -0 val_0 2008-04-08 12 -0 val_0 2008-04-08 12 -0 val_0 2008-04-08 12 -0 val_0 2008-04-09 11 -0 val_0 2008-04-09 11 -0 val_0 2008-04-09 11 -0 val_0 2008-04-09 12 -0 val_0 2008-04-09 12 -0 val_0 2008-04-09 12 -10 val_10 2008-04-08 11 -10 val_10 2008-04-08 12 -10 val_10 2008-04-09 11 -10 val_10 2008-04-09 12 -100 val_100 2008-04-08 11 -100 val_100 2008-04-08 11 -100 val_100 2008-04-08 12 -100 val_100 2008-04-08 12 -100 val_100 2008-04-09 11 -100 val_100 2008-04-09 11 -100 val_100 2008-04-09 12 -100 val_100 2008-04-09 12 -103 val_103 2008-04-08 11 -103 val_103 2008-04-08 11 -103 val_103 2008-04-08 12 -103 val_103 2008-04-08 12 -103 val_103 2008-04-09 11 -103 val_103 2008-04-09 11 -103 val_103 2008-04-09 12 -103 val_103 2008-04-09 12 -104 val_104 2008-04-08 11 -104 val_104 2008-04-08 11 -104 val_104 2008-04-08 12 -104 val_104 2008-04-08 12 -104 val_104 2008-04-09 11 -104 val_104 2008-04-09 11 -104 val_104 2008-04-09 12 -104 val_104 2008-04-09 12 -105 val_105 2008-04-08 11 -105 val_105 2008-04-08 12 -105 val_105 2008-04-09 11 -105 val_105 2008-04-09 12 -11 val_11 2008-04-08 11 -11 val_11 2008-04-08 12 -11 val_11 2008-04-09 11 -11 val_11 2008-04-09 12 -111 val_111 2008-04-08 11 -111 val_111 2008-04-08 12 -111 val_111 2008-04-09 11 -111 val_111 2008-04-09 12 -113 val_113 2008-04-08 11 -113 val_113 2008-04-08 11 -113 val_113 2008-04-08 12 -113 val_113 2008-04-08 12 -113 val_113 2008-04-09 11 -113 val_113 2008-04-09 11 -113 val_113 2008-04-09 12 -113 val_113 2008-04-09 12 -114 val_114 2008-04-08 11 -114 val_114 2008-04-08 12 -114 val_114 2008-04-09 11 -114 val_114 2008-04-09 12 -116 val_116 2008-04-08 11 -116 val_116 2008-04-08 12 -116 val_116 2008-04-09 11 -116 val_116 2008-04-09 12 -118 val_118 2008-04-08 11 -118 val_118 2008-04-08 11 -118 val_118 2008-04-08 12 -118 val_118 2008-04-08 12 -118 val_118 2008-04-09 11 -118 val_118 2008-04-09 11 -118 val_118 2008-04-09 12 -118 val_118 2008-04-09 12 -119 val_119 2008-04-08 11 -119 val_119 2008-04-08 11 -119 val_119 2008-04-08 11 -119 val_119 2008-04-08 12 -119 val_119 2008-04-08 12 -119 val_119 2008-04-08 12 -119 val_119 2008-04-09 11 -119 val_119 2008-04-09 11 -119 val_119 2008-04-09 11 -119 val_119 2008-04-09 12 -119 val_119 2008-04-09 12 -119 val_119 2008-04-09 12 -12 val_12 2008-04-08 11 -12 val_12 2008-04-08 11 -12 val_12 2008-04-08 12 -12 val_12 2008-04-08 12 -12 val_12 2008-04-09 11 -12 val_12 2008-04-09 11 -12 val_12 2008-04-09 12 -12 val_12 2008-04-09 12 -120 val_120 2008-04-08 11 -120 val_120 2008-04-08 11 -120 val_120 2008-04-08 12 -120 val_120 2008-04-08 12 -120 val_120 2008-04-09 11 -120 val_120 2008-04-09 11 -120 val_120 2008-04-09 12 -120 val_120 2008-04-09 12 -125 val_125 2008-04-08 11 -125 val_125 2008-04-08 11 -125 val_125 2008-04-08 12 -125 val_125 2008-04-08 12 -125 val_125 2008-04-09 11 -125 val_125 2008-04-09 11 -125 val_125 2008-04-09 12 -125 val_125 2008-04-09 12 -126 val_126 2008-04-08 11 -126 val_126 2008-04-08 12 -126 val_126 2008-04-09 11 -126 val_126 2008-04-09 12 -128 val_128 2008-04-08 11 -128 val_128 2008-04-08 11 -128 val_128 2008-04-08 11 -128 val_128 2008-04-08 12 -128 val_128 2008-04-08 12 -128 val_128 2008-04-08 12 -128 val_128 2008-04-09 11 -128 val_128 2008-04-09 11 -128 val_128 2008-04-09 11 -128 val_128 2008-04-09 12 -128 val_128 2008-04-09 12 -128 val_128 2008-04-09 12 -129 val_129 2008-04-08 11 -129 val_129 2008-04-08 11 -129 val_129 2008-04-08 12 -129 val_129 2008-04-08 12 -129 val_129 2008-04-09 11 -129 val_129 2008-04-09 11 -129 val_129 2008-04-09 12 -129 val_129 2008-04-09 12 -131 val_131 2008-04-08 11 -131 val_131 2008-04-08 12 -131 val_131 2008-04-09 11 -131 val_131 2008-04-09 12 -133 val_133 2008-04-08 11 -133 val_133 2008-04-08 12 -133 val_133 2008-04-09 11 -133 val_133 2008-04-09 12 -134 val_134 2008-04-08 11 -134 val_134 2008-04-08 11 -134 val_134 2008-04-08 12 -134 val_134 2008-04-08 12 -134 val_134 2008-04-09 11 -134 val_134 2008-04-09 11 -134 val_134 2008-04-09 12 -134 val_134 2008-04-09 12 -136 val_136 2008-04-08 11 -136 val_136 2008-04-08 12 -136 val_136 2008-04-09 11 -136 val_136 2008-04-09 12 -137 val_137 2008-04-08 11 -137 val_137 2008-04-08 11 -137 val_137 2008-04-08 12 -137 val_137 2008-04-08 12 -137 val_137 2008-04-09 11 -137 val_137 2008-04-09 11 -137 val_137 2008-04-09 12 -137 val_137 2008-04-09 12 -138 val_138 2008-04-08 11 -138 val_138 2008-04-08 11 -138 val_138 2008-04-08 11 -138 val_138 2008-04-08 11 -138 val_138 2008-04-08 12 -138 val_138 2008-04-08 12 -138 val_138 2008-04-08 12 -138 val_138 2008-04-08 12 -138 val_138 2008-04-09 11 -138 val_138 2008-04-09 11 -138 val_138 2008-04-09 11 -138 val_138 2008-04-09 11 -138 val_138 2008-04-09 12 -138 val_138 2008-04-09 12 -138 val_138 2008-04-09 12 -138 val_138 2008-04-09 12 -143 val_143 2008-04-08 11 -143 val_143 2008-04-08 12 -143 val_143 2008-04-09 11 -143 val_143 2008-04-09 12 -145 val_145 2008-04-08 11 -145 val_145 2008-04-08 12 -145 val_145 2008-04-09 11 -145 val_145 2008-04-09 12 -146 val_146 2008-04-08 11 -146 val_146 2008-04-08 11 -146 val_146 2008-04-08 12 -146 val_146 2008-04-08 12 -146 val_146 2008-04-09 11 -146 val_146 2008-04-09 11 -146 val_146 2008-04-09 12 -146 val_146 2008-04-09 12 -149 val_149 2008-04-08 11 -149 val_149 2008-04-08 11 -149 val_149 2008-04-08 12 -149 val_149 2008-04-08 12 -149 val_149 2008-04-09 11 -149 val_149 2008-04-09 11 -149 val_149 2008-04-09 12 -149 val_149 2008-04-09 12 -15 val_15 2008-04-08 11 -15 val_15 2008-04-08 11 -15 val_15 2008-04-08 12 -15 val_15 2008-04-08 12 -15 val_15 2008-04-09 11 -15 val_15 2008-04-09 11 -15 val_15 2008-04-09 12 -15 val_15 2008-04-09 12 -150 val_150 2008-04-08 11 -150 val_150 2008-04-08 12 -150 val_150 2008-04-09 11 -150 val_150 2008-04-09 12 -152 val_152 2008-04-08 11 -152 val_152 2008-04-08 11 -152 val_152 2008-04-08 12 -152 val_152 2008-04-08 12 -152 val_152 2008-04-09 11 -152 val_152 2008-04-09 11 -152 val_152 2008-04-09 12 -152 val_152 2008-04-09 12 -153 val_153 2008-04-08 11 -153 val_153 2008-04-08 12 -153 val_153 2008-04-09 11 -153 val_153 2008-04-09 12 -155 val_155 2008-04-08 11 -155 val_155 2008-04-08 12 -155 val_155 2008-04-09 11 -155 val_155 2008-04-09 12 -156 val_156 2008-04-08 11 -156 val_156 2008-04-08 12 -156 val_156 2008-04-09 11 -156 val_156 2008-04-09 12 -157 val_157 2008-04-08 11 -157 val_157 2008-04-08 12 -157 val_157 2008-04-09 11 -157 val_157 2008-04-09 12 -158 val_158 2008-04-08 11 -158 val_158 2008-04-08 12 -158 val_158 2008-04-09 11 -158 val_158 2008-04-09 12 -160 val_160 2008-04-08 11 -160 val_160 2008-04-08 12 -160 val_160 2008-04-09 11 -160 val_160 2008-04-09 12 -162 val_162 2008-04-08 11 -162 val_162 2008-04-08 12 -162 val_162 2008-04-09 11 -162 val_162 2008-04-09 12 -163 val_163 2008-04-08 11 -163 val_163 2008-04-08 12 -163 val_163 2008-04-09 11 -163 val_163 2008-04-09 12 -164 val_164 2008-04-08 11 -164 val_164 2008-04-08 11 -164 val_164 2008-04-08 12 -164 val_164 2008-04-08 12 -164 val_164 2008-04-09 11 -164 val_164 2008-04-09 11 -164 val_164 2008-04-09 12 -164 val_164 2008-04-09 12 -165 val_165 2008-04-08 11 -165 val_165 2008-04-08 11 -165 val_165 2008-04-08 12 -165 val_165 2008-04-08 12 -165 val_165 2008-04-09 11 -165 val_165 2008-04-09 11 -165 val_165 2008-04-09 12 -165 val_165 2008-04-09 12 -166 val_166 2008-04-08 11 -166 val_166 2008-04-08 12 -166 val_166 2008-04-09 11 -166 val_166 2008-04-09 12 -167 val_167 2008-04-08 11 -167 val_167 2008-04-08 11 -167 val_167 2008-04-08 11 -167 val_167 2008-04-08 12 -167 val_167 2008-04-08 12 -167 val_167 2008-04-08 12 -167 val_167 2008-04-09 11 -167 val_167 2008-04-09 11 -167 val_167 2008-04-09 11 -167 val_167 2008-04-09 12 -167 val_167 2008-04-09 12 -167 val_167 2008-04-09 12 -168 val_168 2008-04-08 11 -168 val_168 2008-04-08 12 -168 val_168 2008-04-09 11 -168 val_168 2008-04-09 12 -169 val_169 2008-04-08 11 -169 val_169 2008-04-08 11 -169 val_169 2008-04-08 11 -169 val_169 2008-04-08 11 -169 val_169 2008-04-08 12 -169 val_169 2008-04-08 12 -169 val_169 2008-04-08 12 -169 val_169 2008-04-08 12 -169 val_169 2008-04-09 11 -169 val_169 2008-04-09 11 -169 val_169 2008-04-09 11 -169 val_169 2008-04-09 11 -169 val_169 2008-04-09 12 -169 val_169 2008-04-09 12 -169 val_169 2008-04-09 12 -169 val_169 2008-04-09 12 -17 val_17 2008-04-08 11 -17 val_17 2008-04-08 12 -17 val_17 2008-04-09 11 -17 val_17 2008-04-09 12 -170 val_170 2008-04-08 11 -170 val_170 2008-04-08 12 -170 val_170 2008-04-09 11 -170 val_170 2008-04-09 12 -172 val_172 2008-04-08 11 -172 val_172 2008-04-08 11 -172 val_172 2008-04-08 12 -172 val_172 2008-04-08 12 -172 val_172 2008-04-09 11 -172 val_172 2008-04-09 11 -172 val_172 2008-04-09 12 -172 val_172 2008-04-09 12 -174 val_174 2008-04-08 11 -174 val_174 2008-04-08 11 -174 val_174 2008-04-08 12 -174 val_174 2008-04-08 12 -174 val_174 2008-04-09 11 -174 val_174 2008-04-09 11 -174 val_174 2008-04-09 12 -174 val_174 2008-04-09 12 -175 val_175 2008-04-08 11 -175 val_175 2008-04-08 11 -175 val_175 2008-04-08 12 -175 val_175 2008-04-08 12 -175 val_175 2008-04-09 11 -175 val_175 2008-04-09 11 -175 val_175 2008-04-09 12 -175 val_175 2008-04-09 12 -176 val_176 2008-04-08 11 -176 val_176 2008-04-08 11 -176 val_176 2008-04-08 12 -176 val_176 2008-04-08 12 -176 val_176 2008-04-09 11 -176 val_176 2008-04-09 11 -176 val_176 2008-04-09 12 -176 val_176 2008-04-09 12 -177 val_177 2008-04-08 11 -177 val_177 2008-04-08 12 -177 val_177 2008-04-09 11 -177 val_177 2008-04-09 12 -178 val_178 2008-04-08 11 -178 val_178 2008-04-08 12 -178 val_178 2008-04-09 11 -178 val_178 2008-04-09 12 -179 val_179 2008-04-08 11 -179 val_179 2008-04-08 11 -179 val_179 2008-04-08 12 -179 val_179 2008-04-08 12 -179 val_179 2008-04-09 11 -179 val_179 2008-04-09 11 -179 val_179 2008-04-09 12 -179 val_179 2008-04-09 12 -18 val_18 2008-04-08 11 -18 val_18 2008-04-08 11 -18 val_18 2008-04-08 12 -18 val_18 2008-04-08 12 -18 val_18 2008-04-09 11 -18 val_18 2008-04-09 11 -18 val_18 2008-04-09 12 -18 val_18 2008-04-09 12 -180 val_180 2008-04-08 11 -180 val_180 2008-04-08 12 -180 val_180 2008-04-09 11 -180 val_180 2008-04-09 12 -181 val_181 2008-04-08 11 -181 val_181 2008-04-08 12 -181 val_181 2008-04-09 11 -181 val_181 2008-04-09 12 -183 val_183 2008-04-08 11 -183 val_183 2008-04-08 12 -183 val_183 2008-04-09 11 -183 val_183 2008-04-09 12 -186 val_186 2008-04-08 11 -186 val_186 2008-04-08 12 -186 val_186 2008-04-09 11 -186 val_186 2008-04-09 12 -187 val_187 2008-04-08 11 -187 val_187 2008-04-08 11 -187 val_187 2008-04-08 11 -187 val_187 2008-04-08 12 -187 val_187 2008-04-08 12 -187 val_187 2008-04-08 12 -187 val_187 2008-04-09 11 -187 val_187 2008-04-09 11 -187 val_187 2008-04-09 11 -187 val_187 2008-04-09 12 -187 val_187 2008-04-09 12 -187 val_187 2008-04-09 12 -189 val_189 2008-04-08 11 -189 val_189 2008-04-08 12 -189 val_189 2008-04-09 11 -189 val_189 2008-04-09 12 -19 val_19 2008-04-08 11 -19 val_19 2008-04-08 12 -19 val_19 2008-04-09 11 -19 val_19 2008-04-09 12 -190 val_190 2008-04-08 11 -190 val_190 2008-04-08 12 -190 val_190 2008-04-09 11 -190 val_190 2008-04-09 12 -191 val_191 2008-04-08 11 -191 val_191 2008-04-08 11 -191 val_191 2008-04-08 12 -191 val_191 2008-04-08 12 -191 val_191 2008-04-09 11 -191 val_191 2008-04-09 11 -191 val_191 2008-04-09 12 -191 val_191 2008-04-09 12 -192 val_192 2008-04-08 11 -192 val_192 2008-04-08 12 -192 val_192 2008-04-09 11 -192 val_192 2008-04-09 12 -193 val_193 2008-04-08 11 -193 val_193 2008-04-08 11 -193 val_193 2008-04-08 11 -193 val_193 2008-04-08 12 -193 val_193 2008-04-08 12 -193 val_193 2008-04-08 12 -193 val_193 2008-04-09 11 -193 val_193 2008-04-09 11 -193 val_193 2008-04-09 11 -193 val_193 2008-04-09 12 -193 val_193 2008-04-09 12 -193 val_193 2008-04-09 12 -194 val_194 2008-04-08 11 -194 val_194 2008-04-08 12 -194 val_194 2008-04-09 11 -194 val_194 2008-04-09 12 -195 val_195 2008-04-08 11 -195 val_195 2008-04-08 11 -195 val_195 2008-04-08 12 -195 val_195 2008-04-08 12 -195 val_195 2008-04-09 11 -195 val_195 2008-04-09 11 -195 val_195 2008-04-09 12 -195 val_195 2008-04-09 12 -196 val_196 2008-04-08 11 -196 val_196 2008-04-08 12 -196 val_196 2008-04-09 11 -196 val_196 2008-04-09 12 -197 val_197 2008-04-08 11 -197 val_197 2008-04-08 11 -197 val_197 2008-04-08 12 -197 val_197 2008-04-08 12 -197 val_197 2008-04-09 11 -197 val_197 2008-04-09 11 -197 val_197 2008-04-09 12 -197 val_197 2008-04-09 12 -199 val_199 2008-04-08 11 -199 val_199 2008-04-08 11 -199 val_199 2008-04-08 11 -199 val_199 2008-04-08 12 -199 val_199 2008-04-08 12 -199 val_199 2008-04-08 12 -199 val_199 2008-04-09 11 -199 val_199 2008-04-09 11 -199 val_199 2008-04-09 11 -199 val_199 2008-04-09 12 -199 val_199 2008-04-09 12 -199 val_199 2008-04-09 12 -2 val_2 2008-04-08 11 -2 val_2 2008-04-08 12 -2 val_2 2008-04-09 11 -2 val_2 2008-04-09 12 -20 val_20 2008-04-08 11 -20 val_20 2008-04-08 12 -20 val_20 2008-04-09 11 -20 val_20 2008-04-09 12 -200 val_200 2008-04-08 11 -200 val_200 2008-04-08 11 -200 val_200 2008-04-08 12 -200 val_200 2008-04-08 12 -200 val_200 2008-04-09 11 -200 val_200 2008-04-09 11 -200 val_200 2008-04-09 12 -200 val_200 2008-04-09 12 -201 val_201 2008-04-08 11 -201 val_201 2008-04-08 12 -201 val_201 2008-04-09 11 -201 val_201 2008-04-09 12 -202 val_202 2008-04-08 11 -202 val_202 2008-04-08 12 -202 val_202 2008-04-09 11 -202 val_202 2008-04-09 12 -203 val_203 2008-04-08 11 -203 val_203 2008-04-08 11 -203 val_203 2008-04-08 12 -203 val_203 2008-04-08 12 -203 val_203 2008-04-09 11 -203 val_203 2008-04-09 11 -203 val_203 2008-04-09 12 -203 val_203 2008-04-09 12 -205 val_205 2008-04-08 11 -205 val_205 2008-04-08 11 -205 val_205 2008-04-08 12 -205 val_205 2008-04-08 12 -205 val_205 2008-04-09 11 -205 val_205 2008-04-09 11 -205 val_205 2008-04-09 12 -205 val_205 2008-04-09 12 -207 val_207 2008-04-08 11 -207 val_207 2008-04-08 11 -207 val_207 2008-04-08 12 -207 val_207 2008-04-08 12 -207 val_207 2008-04-09 11 -207 val_207 2008-04-09 11 -207 val_207 2008-04-09 12 -207 val_207 2008-04-09 12 -208 val_208 2008-04-08 11 -208 val_208 2008-04-08 11 -208 val_208 2008-04-08 11 -208 val_208 2008-04-08 12 -208 val_208 2008-04-08 12 -208 val_208 2008-04-08 12 -208 val_208 2008-04-09 11 -208 val_208 2008-04-09 11 -208 val_208 2008-04-09 11 -208 val_208 2008-04-09 12 -208 val_208 2008-04-09 12 -208 val_208 2008-04-09 12 -209 val_209 2008-04-08 11 -209 val_209 2008-04-08 11 -209 val_209 2008-04-08 12 -209 val_209 2008-04-08 12 -209 val_209 2008-04-09 11 -209 val_209 2008-04-09 11 -209 val_209 2008-04-09 12 -209 val_209 2008-04-09 12 -213 val_213 2008-04-08 11 -213 val_213 2008-04-08 11 -213 val_213 2008-04-08 12 -213 val_213 2008-04-08 12 -213 val_213 2008-04-09 11 -213 val_213 2008-04-09 11 -213 val_213 2008-04-09 12 -213 val_213 2008-04-09 12 -214 val_214 2008-04-08 11 -214 val_214 2008-04-08 12 -214 val_214 2008-04-09 11 -214 val_214 2008-04-09 12 -216 val_216 2008-04-08 11 -216 val_216 2008-04-08 11 -216 val_216 2008-04-08 12 -216 val_216 2008-04-08 12 -216 val_216 2008-04-09 11 -216 val_216 2008-04-09 11 -216 val_216 2008-04-09 12 -216 val_216 2008-04-09 12 -217 val_217 2008-04-08 11 -217 val_217 2008-04-08 11 -217 val_217 2008-04-08 12 -217 val_217 2008-04-08 12 -217 val_217 2008-04-09 11 -217 val_217 2008-04-09 11 -217 val_217 2008-04-09 12 -217 val_217 2008-04-09 12 -218 val_218 2008-04-08 11 -218 val_218 2008-04-08 12 -218 val_218 2008-04-09 11 -218 val_218 2008-04-09 12 -219 val_219 2008-04-08 11 -219 val_219 2008-04-08 11 -219 val_219 2008-04-08 12 -219 val_219 2008-04-08 12 -219 val_219 2008-04-09 11 -219 val_219 2008-04-09 11 -219 val_219 2008-04-09 12 -219 val_219 2008-04-09 12 -221 val_221 2008-04-08 11 -221 val_221 2008-04-08 11 -221 val_221 2008-04-08 12 -221 val_221 2008-04-08 12 -221 val_221 2008-04-09 11 -221 val_221 2008-04-09 11 -221 val_221 2008-04-09 12 -221 val_221 2008-04-09 12 -222 val_222 2008-04-08 11 -222 val_222 2008-04-08 12 -222 val_222 2008-04-09 11 -222 val_222 2008-04-09 12 -223 val_223 2008-04-08 11 -223 val_223 2008-04-08 11 -223 val_223 2008-04-08 12 -223 val_223 2008-04-08 12 -223 val_223 2008-04-09 11 -223 val_223 2008-04-09 11 -223 val_223 2008-04-09 12 -223 val_223 2008-04-09 12 -224 val_224 2008-04-08 11 -224 val_224 2008-04-08 11 -224 val_224 2008-04-08 12 -224 val_224 2008-04-08 12 -224 val_224 2008-04-09 11 -224 val_224 2008-04-09 11 -224 val_224 2008-04-09 12 -224 val_224 2008-04-09 12 -226 val_226 2008-04-08 11 -226 val_226 2008-04-08 12 -226 val_226 2008-04-09 11 -226 val_226 2008-04-09 12 -228 val_228 2008-04-08 11 -228 val_228 2008-04-08 12 -228 val_228 2008-04-09 11 -228 val_228 2008-04-09 12 -229 val_229 2008-04-08 11 -229 val_229 2008-04-08 11 -229 val_229 2008-04-08 12 -229 val_229 2008-04-08 12 -229 val_229 2008-04-09 11 -229 val_229 2008-04-09 11 -229 val_229 2008-04-09 12 -229 val_229 2008-04-09 12 -230 val_230 2008-04-08 11 -230 val_230 2008-04-08 11 -230 val_230 2008-04-08 11 -230 val_230 2008-04-08 11 -230 val_230 2008-04-08 11 -230 val_230 2008-04-08 12 -230 val_230 2008-04-08 12 -230 val_230 2008-04-08 12 -230 val_230 2008-04-08 12 -230 val_230 2008-04-08 12 -230 val_230 2008-04-09 11 -230 val_230 2008-04-09 11 -230 val_230 2008-04-09 11 -230 val_230 2008-04-09 11 -230 val_230 2008-04-09 11 -230 val_230 2008-04-09 12 -230 val_230 2008-04-09 12 -230 val_230 2008-04-09 12 -230 val_230 2008-04-09 12 -230 val_230 2008-04-09 12 -233 val_233 2008-04-08 11 -233 val_233 2008-04-08 11 -233 val_233 2008-04-08 12 -233 val_233 2008-04-08 12 -233 val_233 2008-04-09 11 -233 val_233 2008-04-09 11 -233 val_233 2008-04-09 12 -233 val_233 2008-04-09 12 -235 val_235 2008-04-08 11 -235 val_235 2008-04-08 12 -235 val_235 2008-04-09 11 -235 val_235 2008-04-09 12 -237 val_237 2008-04-08 11 -237 val_237 2008-04-08 11 -237 val_237 2008-04-08 12 -237 val_237 2008-04-08 12 -237 val_237 2008-04-09 11 -237 val_237 2008-04-09 11 -237 val_237 2008-04-09 12 -237 val_237 2008-04-09 12 -238 val_238 2008-04-08 11 -238 val_238 2008-04-08 11 -238 val_238 2008-04-08 12 -238 val_238 2008-04-08 12 -238 val_238 2008-04-09 11 -238 val_238 2008-04-09 11 -238 val_238 2008-04-09 12 -238 val_238 2008-04-09 12 -239 val_239 2008-04-08 11 -239 val_239 2008-04-08 11 -239 val_239 2008-04-08 12 -239 val_239 2008-04-08 12 -239 val_239 2008-04-09 11 -239 val_239 2008-04-09 11 -239 val_239 2008-04-09 12 -239 val_239 2008-04-09 12 -24 val_24 2008-04-08 11 -24 val_24 2008-04-08 11 -24 val_24 2008-04-08 12 -24 val_24 2008-04-08 12 -24 val_24 2008-04-09 11 -24 val_24 2008-04-09 11 -24 val_24 2008-04-09 12 -24 val_24 2008-04-09 12 -241 val_241 2008-04-08 11 -241 val_241 2008-04-08 12 -241 val_241 2008-04-09 11 -241 val_241 2008-04-09 12 -242 val_242 2008-04-08 11 -242 val_242 2008-04-08 11 -242 val_242 2008-04-08 12 -242 val_242 2008-04-08 12 -242 val_242 2008-04-09 11 -242 val_242 2008-04-09 11 -242 val_242 2008-04-09 12 -242 val_242 2008-04-09 12 -244 val_244 2008-04-08 11 -244 val_244 2008-04-08 12 -244 val_244 2008-04-09 11 -244 val_244 2008-04-09 12 -247 val_247 2008-04-08 11 -247 val_247 2008-04-08 12 -247 val_247 2008-04-09 11 -247 val_247 2008-04-09 12 -248 val_248 2008-04-08 11 -248 val_248 2008-04-08 12 -248 val_248 2008-04-09 11 -248 val_248 2008-04-09 12 -249 val_249 2008-04-08 11 -249 val_249 2008-04-08 12 -249 val_249 2008-04-09 11 -249 val_249 2008-04-09 12 -252 val_252 2008-04-08 11 -252 val_252 2008-04-08 12 -252 val_252 2008-04-09 11 -252 val_252 2008-04-09 12 -255 val_255 2008-04-08 11 -255 val_255 2008-04-08 11 -255 val_255 2008-04-08 12 -255 val_255 2008-04-08 12 -255 val_255 2008-04-09 11 -255 val_255 2008-04-09 11 -255 val_255 2008-04-09 12 -255 val_255 2008-04-09 12 -256 val_256 2008-04-08 11 -256 val_256 2008-04-08 11 -256 val_256 2008-04-08 12 -256 val_256 2008-04-08 12 -256 val_256 2008-04-09 11 -256 val_256 2008-04-09 11 -256 val_256 2008-04-09 12 -256 val_256 2008-04-09 12 -257 val_257 2008-04-08 11 -257 val_257 2008-04-08 12 -257 val_257 2008-04-09 11 -257 val_257 2008-04-09 12 -258 val_258 2008-04-08 11 -258 val_258 2008-04-08 12 -258 val_258 2008-04-09 11 -258 val_258 2008-04-09 12 -26 val_26 2008-04-08 11 -26 val_26 2008-04-08 11 -26 val_26 2008-04-08 12 -26 val_26 2008-04-08 12 -26 val_26 2008-04-09 11 -26 val_26 2008-04-09 11 -26 val_26 2008-04-09 12 -26 val_26 2008-04-09 12 -260 val_260 2008-04-08 11 -260 val_260 2008-04-08 12 -260 val_260 2008-04-09 11 -260 val_260 2008-04-09 12 -262 val_262 2008-04-08 11 -262 val_262 2008-04-08 12 -262 val_262 2008-04-09 11 -262 val_262 2008-04-09 12 -263 val_263 2008-04-08 11 -263 val_263 2008-04-08 12 -263 val_263 2008-04-09 11 -263 val_263 2008-04-09 12 -265 val_265 2008-04-08 11 -265 val_265 2008-04-08 11 -265 val_265 2008-04-08 12 -265 val_265 2008-04-08 12 -265 val_265 2008-04-09 11 -265 val_265 2008-04-09 11 -265 val_265 2008-04-09 12 -265 val_265 2008-04-09 12 -266 val_266 2008-04-08 11 -266 val_266 2008-04-08 12 -266 val_266 2008-04-09 11 -266 val_266 2008-04-09 12 -27 val_27 2008-04-08 11 -27 val_27 2008-04-08 12 -27 val_27 2008-04-09 11 -27 val_27 2008-04-09 12 -272 val_272 2008-04-08 11 -272 val_272 2008-04-08 11 -272 val_272 2008-04-08 12 -272 val_272 2008-04-08 12 -272 val_272 2008-04-09 11 -272 val_272 2008-04-09 11 -272 val_272 2008-04-09 12 -272 val_272 2008-04-09 12 -273 val_273 2008-04-08 11 -273 val_273 2008-04-08 11 -273 val_273 2008-04-08 11 -273 val_273 2008-04-08 12 -273 val_273 2008-04-08 12 -273 val_273 2008-04-08 12 -273 val_273 2008-04-09 11 -273 val_273 2008-04-09 11 -273 val_273 2008-04-09 11 -273 val_273 2008-04-09 12 -273 val_273 2008-04-09 12 -273 val_273 2008-04-09 12 -274 val_274 2008-04-08 11 -274 val_274 2008-04-08 12 -274 val_274 2008-04-09 11 -274 val_274 2008-04-09 12 -275 val_275 2008-04-08 11 -275 val_275 2008-04-08 12 -275 val_275 2008-04-09 11 -275 val_275 2008-04-09 12 -277 val_277 2008-04-08 11 -277 val_277 2008-04-08 11 -277 val_277 2008-04-08 11 -277 val_277 2008-04-08 11 -277 val_277 2008-04-08 12 -277 val_277 2008-04-08 12 -277 val_277 2008-04-08 12 -277 val_277 2008-04-08 12 -277 val_277 2008-04-09 11 -277 val_277 2008-04-09 11 -277 val_277 2008-04-09 11 -277 val_277 2008-04-09 11 -277 val_277 2008-04-09 12 -277 val_277 2008-04-09 12 -277 val_277 2008-04-09 12 -277 val_277 2008-04-09 12 -278 val_278 2008-04-08 11 -278 val_278 2008-04-08 11 -278 val_278 2008-04-08 12 -278 val_278 2008-04-08 12 -278 val_278 2008-04-09 11 -278 val_278 2008-04-09 11 -278 val_278 2008-04-09 12 -278 val_278 2008-04-09 12 -28 val_28 2008-04-08 11 -28 val_28 2008-04-08 12 -28 val_28 2008-04-09 11 -28 val_28 2008-04-09 12 -280 val_280 2008-04-08 11 -280 val_280 2008-04-08 11 -280 val_280 2008-04-08 12 -280 val_280 2008-04-08 12 -280 val_280 2008-04-09 11 -280 val_280 2008-04-09 11 -280 val_280 2008-04-09 12 -280 val_280 2008-04-09 12 -281 val_281 2008-04-08 11 -281 val_281 2008-04-08 11 -281 val_281 2008-04-08 12 -281 val_281 2008-04-08 12 -281 val_281 2008-04-09 11 -281 val_281 2008-04-09 11 -281 val_281 2008-04-09 12 -281 val_281 2008-04-09 12 -282 val_282 2008-04-08 11 -282 val_282 2008-04-08 11 -282 val_282 2008-04-08 12 -282 val_282 2008-04-08 12 -282 val_282 2008-04-09 11 -282 val_282 2008-04-09 11 -282 val_282 2008-04-09 12 -282 val_282 2008-04-09 12 -283 val_283 2008-04-08 11 -283 val_283 2008-04-08 12 -283 val_283 2008-04-09 11 -283 val_283 2008-04-09 12 -284 val_284 2008-04-08 11 -284 val_284 2008-04-08 12 -284 val_284 2008-04-09 11 -284 val_284 2008-04-09 12 -285 val_285 2008-04-08 11 -285 val_285 2008-04-08 12 -285 val_285 2008-04-09 11 -285 val_285 2008-04-09 12 -286 val_286 2008-04-08 11 -286 val_286 2008-04-08 12 -286 val_286 2008-04-09 11 -286 val_286 2008-04-09 12 -287 val_287 2008-04-08 11 -287 val_287 2008-04-08 12 -287 val_287 2008-04-09 11 -287 val_287 2008-04-09 12 -288 val_288 2008-04-08 11 -288 val_288 2008-04-08 11 -288 val_288 2008-04-08 12 -288 val_288 2008-04-08 12 -288 val_288 2008-04-09 11 -288 val_288 2008-04-09 11 -288 val_288 2008-04-09 12 -288 val_288 2008-04-09 12 -289 val_289 2008-04-08 11 -289 val_289 2008-04-08 12 -289 val_289 2008-04-09 11 -289 val_289 2008-04-09 12 -291 val_291 2008-04-08 11 -291 val_291 2008-04-08 12 -291 val_291 2008-04-09 11 -291 val_291 2008-04-09 12 -292 val_292 2008-04-08 11 -292 val_292 2008-04-08 12 -292 val_292 2008-04-09 11 -292 val_292 2008-04-09 12 -296 val_296 2008-04-08 11 -296 val_296 2008-04-08 12 -296 val_296 2008-04-09 11 -296 val_296 2008-04-09 12 -298 val_298 2008-04-08 11 -298 val_298 2008-04-08 11 -298 val_298 2008-04-08 11 -298 val_298 2008-04-08 12 -298 val_298 2008-04-08 12 -298 val_298 2008-04-08 12 -298 val_298 2008-04-09 11 -298 val_298 2008-04-09 11 -298 val_298 2008-04-09 11 -298 val_298 2008-04-09 12 -298 val_298 2008-04-09 12 -298 val_298 2008-04-09 12 -30 val_30 2008-04-08 11 -30 val_30 2008-04-08 12 -30 val_30 2008-04-09 11 -30 val_30 2008-04-09 12 -302 val_302 2008-04-08 11 -302 val_302 2008-04-08 12 -302 val_302 2008-04-09 11 -302 val_302 2008-04-09 12 -305 val_305 2008-04-08 11 -305 val_305 2008-04-08 12 -305 val_305 2008-04-09 11 -305 val_305 2008-04-09 12 -306 val_306 2008-04-08 11 -306 val_306 2008-04-08 12 -306 val_306 2008-04-09 11 -306 val_306 2008-04-09 12 -307 val_307 2008-04-08 11 -307 val_307 2008-04-08 11 -307 val_307 2008-04-08 12 -307 val_307 2008-04-08 12 -307 val_307 2008-04-09 11 -307 val_307 2008-04-09 11 -307 val_307 2008-04-09 12 -307 val_307 2008-04-09 12 -308 val_308 2008-04-08 11 -308 val_308 2008-04-08 12 -308 val_308 2008-04-09 11 -308 val_308 2008-04-09 12 -309 val_309 2008-04-08 11 -309 val_309 2008-04-08 11 -309 val_309 2008-04-08 12 -309 val_309 2008-04-08 12 -309 val_309 2008-04-09 11 -309 val_309 2008-04-09 11 -309 val_309 2008-04-09 12 -309 val_309 2008-04-09 12 -310 val_310 2008-04-08 11 -310 val_310 2008-04-08 12 -310 val_310 2008-04-09 11 -310 val_310 2008-04-09 12 -311 val_311 2008-04-08 11 -311 val_311 2008-04-08 11 -311 val_311 2008-04-08 11 -311 val_311 2008-04-08 12 -311 val_311 2008-04-08 12 -311 val_311 2008-04-08 12 -311 val_311 2008-04-09 11 -311 val_311 2008-04-09 11 -311 val_311 2008-04-09 11 -311 val_311 2008-04-09 12 -311 val_311 2008-04-09 12 -311 val_311 2008-04-09 12 -315 val_315 2008-04-08 11 -315 val_315 2008-04-08 12 -315 val_315 2008-04-09 11 -315 val_315 2008-04-09 12 -316 val_316 2008-04-08 11 -316 val_316 2008-04-08 11 -316 val_316 2008-04-08 11 -316 val_316 2008-04-08 12 -316 val_316 2008-04-08 12 -316 val_316 2008-04-08 12 -316 val_316 2008-04-09 11 -316 val_316 2008-04-09 11 -316 val_316 2008-04-09 11 -316 val_316 2008-04-09 12 -316 val_316 2008-04-09 12 -316 val_316 2008-04-09 12 -317 val_317 2008-04-08 11 -317 val_317 2008-04-08 11 -317 val_317 2008-04-08 12 -317 val_317 2008-04-08 12 -317 val_317 2008-04-09 11 -317 val_317 2008-04-09 11 -317 val_317 2008-04-09 12 -317 val_317 2008-04-09 12 -318 val_318 2008-04-08 11 -318 val_318 2008-04-08 11 -318 val_318 2008-04-08 11 -318 val_318 2008-04-08 12 -318 val_318 2008-04-08 12 -318 val_318 2008-04-08 12 -318 val_318 2008-04-09 11 -318 val_318 2008-04-09 11 -318 val_318 2008-04-09 11 -318 val_318 2008-04-09 12 -318 val_318 2008-04-09 12 -318 val_318 2008-04-09 12 -321 val_321 2008-04-08 11 -321 val_321 2008-04-08 11 -321 val_321 2008-04-08 12 -321 val_321 2008-04-08 12 -321 val_321 2008-04-09 11 -321 val_321 2008-04-09 11 -321 val_321 2008-04-09 12 -321 val_321 2008-04-09 12 -322 val_322 2008-04-08 11 -322 val_322 2008-04-08 11 -322 val_322 2008-04-08 12 -322 val_322 2008-04-08 12 -322 val_322 2008-04-09 11 -322 val_322 2008-04-09 11 -322 val_322 2008-04-09 12 -322 val_322 2008-04-09 12 -323 val_323 2008-04-08 11 -323 val_323 2008-04-08 12 -323 val_323 2008-04-09 11 -323 val_323 2008-04-09 12 -325 val_325 2008-04-08 11 -325 val_325 2008-04-08 11 -325 val_325 2008-04-08 12 -325 val_325 2008-04-08 12 -325 val_325 2008-04-09 11 -325 val_325 2008-04-09 11 -325 val_325 2008-04-09 12 -325 val_325 2008-04-09 12 -327 val_327 2008-04-08 11 -327 val_327 2008-04-08 11 -327 val_327 2008-04-08 11 -327 val_327 2008-04-08 12 -327 val_327 2008-04-08 12 -327 val_327 2008-04-08 12 -327 val_327 2008-04-09 11 -327 val_327 2008-04-09 11 -327 val_327 2008-04-09 11 -327 val_327 2008-04-09 12 -327 val_327 2008-04-09 12 -327 val_327 2008-04-09 12 -33 val_33 2008-04-08 11 -33 val_33 2008-04-08 12 -33 val_33 2008-04-09 11 -33 val_33 2008-04-09 12 -331 val_331 2008-04-08 11 -331 val_331 2008-04-08 11 -331 val_331 2008-04-08 12 -331 val_331 2008-04-08 12 -331 val_331 2008-04-09 11 -331 val_331 2008-04-09 11 -331 val_331 2008-04-09 12 -331 val_331 2008-04-09 12 -332 val_332 2008-04-08 11 -332 val_332 2008-04-08 12 -332 val_332 2008-04-09 11 -332 val_332 2008-04-09 12 -333 val_333 2008-04-08 11 -333 val_333 2008-04-08 11 -333 val_333 2008-04-08 12 -333 val_333 2008-04-08 12 -333 val_333 2008-04-09 11 -333 val_333 2008-04-09 11 -333 val_333 2008-04-09 12 -333 val_333 2008-04-09 12 -335 val_335 2008-04-08 11 -335 val_335 2008-04-08 12 -335 val_335 2008-04-09 11 -335 val_335 2008-04-09 12 -336 val_336 2008-04-08 11 -336 val_336 2008-04-08 12 -336 val_336 2008-04-09 11 -336 val_336 2008-04-09 12 -338 val_338 2008-04-08 11 -338 val_338 2008-04-08 12 -338 val_338 2008-04-09 11 -338 val_338 2008-04-09 12 -339 val_339 2008-04-08 11 -339 val_339 2008-04-08 12 -339 val_339 2008-04-09 11 -339 val_339 2008-04-09 12 -34 val_34 2008-04-08 11 -34 val_34 2008-04-08 12 -34 val_34 2008-04-09 11 -34 val_34 2008-04-09 12 -341 val_341 2008-04-08 11 -341 val_341 2008-04-08 12 -341 val_341 2008-04-09 11 -341 val_341 2008-04-09 12 -342 val_342 2008-04-08 11 -342 val_342 2008-04-08 11 -342 val_342 2008-04-08 12 -342 val_342 2008-04-08 12 -342 val_342 2008-04-09 11 -342 val_342 2008-04-09 11 -342 val_342 2008-04-09 12 -342 val_342 2008-04-09 12 -344 val_344 2008-04-08 11 -344 val_344 2008-04-08 11 -344 val_344 2008-04-08 12 -344 val_344 2008-04-08 12 -344 val_344 2008-04-09 11 -344 val_344 2008-04-09 11 -344 val_344 2008-04-09 12 -344 val_344 2008-04-09 12 -345 val_345 2008-04-08 11 -345 val_345 2008-04-08 12 -345 val_345 2008-04-09 11 -345 val_345 2008-04-09 12 -348 val_348 2008-04-08 11 -348 val_348 2008-04-08 11 -348 val_348 2008-04-08 11 -348 val_348 2008-04-08 11 -348 val_348 2008-04-08 11 -348 val_348 2008-04-08 12 -348 val_348 2008-04-08 12 -348 val_348 2008-04-08 12 -348 val_348 2008-04-08 12 -348 val_348 2008-04-08 12 -348 val_348 2008-04-09 11 -348 val_348 2008-04-09 11 -348 val_348 2008-04-09 11 -348 val_348 2008-04-09 11 -348 val_348 2008-04-09 11 -348 val_348 2008-04-09 12 -348 val_348 2008-04-09 12 -348 val_348 2008-04-09 12 -348 val_348 2008-04-09 12 -348 val_348 2008-04-09 12 -35 val_35 2008-04-08 11 -35 val_35 2008-04-08 11 -35 val_35 2008-04-08 11 -35 val_35 2008-04-08 12 -35 val_35 2008-04-08 12 -35 val_35 2008-04-08 12 -35 val_35 2008-04-09 11 -35 val_35 2008-04-09 11 -35 val_35 2008-04-09 11 -35 val_35 2008-04-09 12 -35 val_35 2008-04-09 12 -35 val_35 2008-04-09 12 -351 val_351 2008-04-08 11 -351 val_351 2008-04-08 12 -351 val_351 2008-04-09 11 -351 val_351 2008-04-09 12 -353 val_353 2008-04-08 11 -353 val_353 2008-04-08 11 -353 val_353 2008-04-08 12 -353 val_353 2008-04-08 12 -353 val_353 2008-04-09 11 -353 val_353 2008-04-09 11 -353 val_353 2008-04-09 12 -353 val_353 2008-04-09 12 -356 val_356 2008-04-08 11 -356 val_356 2008-04-08 12 -356 val_356 2008-04-09 11 -356 val_356 2008-04-09 12 -360 val_360 2008-04-08 11 -360 val_360 2008-04-08 12 -360 val_360 2008-04-09 11 -360 val_360 2008-04-09 12 -362 val_362 2008-04-08 11 -362 val_362 2008-04-08 12 -362 val_362 2008-04-09 11 -362 val_362 2008-04-09 12 -364 val_364 2008-04-08 11 -364 val_364 2008-04-08 12 -364 val_364 2008-04-09 11 -364 val_364 2008-04-09 12 -365 val_365 2008-04-08 11 -365 val_365 2008-04-08 12 -365 val_365 2008-04-09 11 -365 val_365 2008-04-09 12 -366 val_366 2008-04-08 11 -366 val_366 2008-04-08 12 -366 val_366 2008-04-09 11 -366 val_366 2008-04-09 12 -367 val_367 2008-04-08 11 -367 val_367 2008-04-08 11 -367 val_367 2008-04-08 12 -367 val_367 2008-04-08 12 -367 val_367 2008-04-09 11 -367 val_367 2008-04-09 11 -367 val_367 2008-04-09 12 -367 val_367 2008-04-09 12 -368 val_368 2008-04-08 11 -368 val_368 2008-04-08 12 -368 val_368 2008-04-09 11 -368 val_368 2008-04-09 12 -369 val_369 2008-04-08 11 -369 val_369 2008-04-08 11 -369 val_369 2008-04-08 11 -369 val_369 2008-04-08 12 -369 val_369 2008-04-08 12 -369 val_369 2008-04-08 12 -369 val_369 2008-04-09 11 -369 val_369 2008-04-09 11 -369 val_369 2008-04-09 11 -369 val_369 2008-04-09 12 -369 val_369 2008-04-09 12 -369 val_369 2008-04-09 12 -37 val_37 2008-04-08 11 -37 val_37 2008-04-08 11 -37 val_37 2008-04-08 12 -37 val_37 2008-04-08 12 -37 val_37 2008-04-09 11 -37 val_37 2008-04-09 11 -37 val_37 2008-04-09 12 -37 val_37 2008-04-09 12 -373 val_373 2008-04-08 11 -373 val_373 2008-04-08 12 -373 val_373 2008-04-09 11 -373 val_373 2008-04-09 12 -374 val_374 2008-04-08 11 -374 val_374 2008-04-08 12 -374 val_374 2008-04-09 11 -374 val_374 2008-04-09 12 -375 val_375 2008-04-08 11 -375 val_375 2008-04-08 12 -375 val_375 2008-04-09 11 -375 val_375 2008-04-09 12 -377 val_377 2008-04-08 11 -377 val_377 2008-04-08 12 -377 val_377 2008-04-09 11 -377 val_377 2008-04-09 12 -378 val_378 2008-04-08 11 -378 val_378 2008-04-08 12 -378 val_378 2008-04-09 11 -378 val_378 2008-04-09 12 -379 val_379 2008-04-08 11 -379 val_379 2008-04-08 12 -379 val_379 2008-04-09 11 -379 val_379 2008-04-09 12 -382 val_382 2008-04-08 11 -382 val_382 2008-04-08 11 -382 val_382 2008-04-08 12 -382 val_382 2008-04-08 12 -382 val_382 2008-04-09 11 -382 val_382 2008-04-09 11 -382 val_382 2008-04-09 12 -382 val_382 2008-04-09 12 -384 val_384 2008-04-08 11 -384 val_384 2008-04-08 11 -384 val_384 2008-04-08 11 -384 val_384 2008-04-08 12 -384 val_384 2008-04-08 12 -384 val_384 2008-04-08 12 -384 val_384 2008-04-09 11 -384 val_384 2008-04-09 11 -384 val_384 2008-04-09 11 -384 val_384 2008-04-09 12 -384 val_384 2008-04-09 12 -384 val_384 2008-04-09 12 -386 val_386 2008-04-08 11 -386 val_386 2008-04-08 12 -386 val_386 2008-04-09 11 -386 val_386 2008-04-09 12 -389 val_389 2008-04-08 11 -389 val_389 2008-04-08 12 -389 val_389 2008-04-09 11 -389 val_389 2008-04-09 12 -392 val_392 2008-04-08 11 -392 val_392 2008-04-08 12 -392 val_392 2008-04-09 11 -392 val_392 2008-04-09 12 -393 val_393 2008-04-08 11 -393 val_393 2008-04-08 12 -393 val_393 2008-04-09 11 -393 val_393 2008-04-09 12 -394 val_394 2008-04-08 11 -394 val_394 2008-04-08 12 -394 val_394 2008-04-09 11 -394 val_394 2008-04-09 12 -395 val_395 2008-04-08 11 -395 val_395 2008-04-08 11 -395 val_395 2008-04-08 12 -395 val_395 2008-04-08 12 -395 val_395 2008-04-09 11 -395 val_395 2008-04-09 11 -395 val_395 2008-04-09 12 -395 val_395 2008-04-09 12 -396 val_396 2008-04-08 11 -396 val_396 2008-04-08 11 -396 val_396 2008-04-08 11 -396 val_396 2008-04-08 12 -396 val_396 2008-04-08 12 -396 val_396 2008-04-08 12 -396 val_396 2008-04-09 11 -396 val_396 2008-04-09 11 -396 val_396 2008-04-09 11 -396 val_396 2008-04-09 12 -396 val_396 2008-04-09 12 -396 val_396 2008-04-09 12 -397 val_397 2008-04-08 11 -397 val_397 2008-04-08 11 -397 val_397 2008-04-08 12 -397 val_397 2008-04-08 12 -397 val_397 2008-04-09 11 -397 val_397 2008-04-09 11 -397 val_397 2008-04-09 12 -397 val_397 2008-04-09 12 -399 val_399 2008-04-08 11 -399 val_399 2008-04-08 11 -399 val_399 2008-04-08 12 -399 val_399 2008-04-08 12 -399 val_399 2008-04-09 11 -399 val_399 2008-04-09 11 -399 val_399 2008-04-09 12 -399 val_399 2008-04-09 12 -4 val_4 2008-04-08 11 -4 val_4 2008-04-08 12 -4 val_4 2008-04-09 11 -4 val_4 2008-04-09 12 -400 val_400 2008-04-08 11 -400 val_400 2008-04-08 12 -400 val_400 2008-04-09 11 -400 val_400 2008-04-09 12 -401 val_401 2008-04-08 11 -401 val_401 2008-04-08 11 -401 val_401 2008-04-08 11 -401 val_401 2008-04-08 11 -401 val_401 2008-04-08 11 -401 val_401 2008-04-08 12 -401 val_401 2008-04-08 12 -401 val_401 2008-04-08 12 -401 val_401 2008-04-08 12 -401 val_401 2008-04-08 12 -401 val_401 2008-04-09 11 -401 val_401 2008-04-09 11 -401 val_401 2008-04-09 11 -401 val_401 2008-04-09 11 -401 val_401 2008-04-09 11 -401 val_401 2008-04-09 12 -401 val_401 2008-04-09 12 -401 val_401 2008-04-09 12 -401 val_401 2008-04-09 12 -401 val_401 2008-04-09 12 -402 val_402 2008-04-08 11 -402 val_402 2008-04-08 12 -402 val_402 2008-04-09 11 -402 val_402 2008-04-09 12 -403 val_403 2008-04-08 11 -403 val_403 2008-04-08 11 -403 val_403 2008-04-08 11 -403 val_403 2008-04-08 12 -403 val_403 2008-04-08 12 -403 val_403 2008-04-08 12 -403 val_403 2008-04-09 11 -403 val_403 2008-04-09 11 -403 val_403 2008-04-09 11 -403 val_403 2008-04-09 12 -403 val_403 2008-04-09 12 -403 val_403 2008-04-09 12 -404 val_404 2008-04-08 11 -404 val_404 2008-04-08 11 -404 val_404 2008-04-08 12 -404 val_404 2008-04-08 12 -404 val_404 2008-04-09 11 -404 val_404 2008-04-09 11 -404 val_404 2008-04-09 12 -404 val_404 2008-04-09 12 -406 val_406 2008-04-08 11 -406 val_406 2008-04-08 11 -406 val_406 2008-04-08 11 -406 val_406 2008-04-08 11 -406 val_406 2008-04-08 12 -406 val_406 2008-04-08 12 -406 val_406 2008-04-08 12 -406 val_406 2008-04-08 12 -406 val_406 2008-04-09 11 -406 val_406 2008-04-09 11 -406 val_406 2008-04-09 11 -406 val_406 2008-04-09 11 -406 val_406 2008-04-09 12 -406 val_406 2008-04-09 12 -406 val_406 2008-04-09 12 -406 val_406 2008-04-09 12 -407 val_407 2008-04-08 11 -407 val_407 2008-04-08 12 -407 val_407 2008-04-09 11 -407 val_407 2008-04-09 12 -409 val_409 2008-04-08 11 -409 val_409 2008-04-08 11 -409 val_409 2008-04-08 11 -409 val_409 2008-04-08 12 -409 val_409 2008-04-08 12 -409 val_409 2008-04-08 12 -409 val_409 2008-04-09 11 -409 val_409 2008-04-09 11 -409 val_409 2008-04-09 11 -409 val_409 2008-04-09 12 -409 val_409 2008-04-09 12 -409 val_409 2008-04-09 12 -41 val_41 2008-04-08 11 -41 val_41 2008-04-08 12 -41 val_41 2008-04-09 11 -41 val_41 2008-04-09 12 -411 val_411 2008-04-08 11 -411 val_411 2008-04-08 12 -411 val_411 2008-04-09 11 -411 val_411 2008-04-09 12 -413 val_413 2008-04-08 11 -413 val_413 2008-04-08 11 -413 val_413 2008-04-08 12 -413 val_413 2008-04-08 12 -413 val_413 2008-04-09 11 -413 val_413 2008-04-09 11 -413 val_413 2008-04-09 12 -413 val_413 2008-04-09 12 -414 val_414 2008-04-08 11 -414 val_414 2008-04-08 11 -414 val_414 2008-04-08 12 -414 val_414 2008-04-08 12 -414 val_414 2008-04-09 11 -414 val_414 2008-04-09 11 -414 val_414 2008-04-09 12 -414 val_414 2008-04-09 12 -417 val_417 2008-04-08 11 -417 val_417 2008-04-08 11 -417 val_417 2008-04-08 11 -417 val_417 2008-04-08 12 -417 val_417 2008-04-08 12 -417 val_417 2008-04-08 12 -417 val_417 2008-04-09 11 -417 val_417 2008-04-09 11 -417 val_417 2008-04-09 11 -417 val_417 2008-04-09 12 -417 val_417 2008-04-09 12 -417 val_417 2008-04-09 12 -418 val_418 2008-04-08 11 -418 val_418 2008-04-08 12 -418 val_418 2008-04-09 11 -418 val_418 2008-04-09 12 -419 val_419 2008-04-08 11 -419 val_419 2008-04-08 12 -419 val_419 2008-04-09 11 -419 val_419 2008-04-09 12 -42 val_42 2008-04-08 11 -42 val_42 2008-04-08 11 -42 val_42 2008-04-08 12 -42 val_42 2008-04-08 12 -42 val_42 2008-04-09 11 -42 val_42 2008-04-09 11 -42 val_42 2008-04-09 12 -42 val_42 2008-04-09 12 -421 val_421 2008-04-08 11 -421 val_421 2008-04-08 12 -421 val_421 2008-04-09 11 -421 val_421 2008-04-09 12 -424 val_424 2008-04-08 11 -424 val_424 2008-04-08 11 -424 val_424 2008-04-08 12 -424 val_424 2008-04-08 12 -424 val_424 2008-04-09 11 -424 val_424 2008-04-09 11 -424 val_424 2008-04-09 12 -424 val_424 2008-04-09 12 -427 val_427 2008-04-08 11 -427 val_427 2008-04-08 12 -427 val_427 2008-04-09 11 -427 val_427 2008-04-09 12 -429 val_429 2008-04-08 11 -429 val_429 2008-04-08 11 -429 val_429 2008-04-08 12 -429 val_429 2008-04-08 12 -429 val_429 2008-04-09 11 -429 val_429 2008-04-09 11 -429 val_429 2008-04-09 12 -429 val_429 2008-04-09 12 -43 val_43 2008-04-08 11 -43 val_43 2008-04-08 12 -43 val_43 2008-04-09 11 -43 val_43 2008-04-09 12 -430 val_430 2008-04-08 11 -430 val_430 2008-04-08 11 -430 val_430 2008-04-08 11 -430 val_430 2008-04-08 12 -430 val_430 2008-04-08 12 -430 val_430 2008-04-08 12 -430 val_430 2008-04-09 11 -430 val_430 2008-04-09 11 -430 val_430 2008-04-09 11 -430 val_430 2008-04-09 12 -430 val_430 2008-04-09 12 -430 val_430 2008-04-09 12 -431 val_431 2008-04-08 11 -431 val_431 2008-04-08 11 -431 val_431 2008-04-08 11 -431 val_431 2008-04-08 12 -431 val_431 2008-04-08 12 -431 val_431 2008-04-08 12 -431 val_431 2008-04-09 11 -431 val_431 2008-04-09 11 -431 val_431 2008-04-09 11 -431 val_431 2008-04-09 12 -431 val_431 2008-04-09 12 -431 val_431 2008-04-09 12 -432 val_432 2008-04-08 11 -432 val_432 2008-04-08 12 -432 val_432 2008-04-09 11 -432 val_432 2008-04-09 12 -435 val_435 2008-04-08 11 -435 val_435 2008-04-08 12 -435 val_435 2008-04-09 11 -435 val_435 2008-04-09 12 -436 val_436 2008-04-08 11 -436 val_436 2008-04-08 12 -436 val_436 2008-04-09 11 -436 val_436 2008-04-09 12 -437 val_437 2008-04-08 11 -437 val_437 2008-04-08 12 -437 val_437 2008-04-09 11 -437 val_437 2008-04-09 12 -438 val_438 2008-04-08 11 -438 val_438 2008-04-08 11 -438 val_438 2008-04-08 11 -438 val_438 2008-04-08 12 -438 val_438 2008-04-08 12 -438 val_438 2008-04-08 12 -438 val_438 2008-04-09 11 -438 val_438 2008-04-09 11 -438 val_438 2008-04-09 11 -438 val_438 2008-04-09 12 -438 val_438 2008-04-09 12 -438 val_438 2008-04-09 12 -439 val_439 2008-04-08 11 -439 val_439 2008-04-08 11 -439 val_439 2008-04-08 12 -439 val_439 2008-04-08 12 -439 val_439 2008-04-09 11 -439 val_439 2008-04-09 11 -439 val_439 2008-04-09 12 -439 val_439 2008-04-09 12 -44 val_44 2008-04-08 11 -44 val_44 2008-04-08 12 -44 val_44 2008-04-09 11 -44 val_44 2008-04-09 12 -443 val_443 2008-04-08 11 -443 val_443 2008-04-08 12 -443 val_443 2008-04-09 11 -443 val_443 2008-04-09 12 -444 val_444 2008-04-08 11 -444 val_444 2008-04-08 12 -444 val_444 2008-04-09 11 -444 val_444 2008-04-09 12 -446 val_446 2008-04-08 11 -446 val_446 2008-04-08 12 -446 val_446 2008-04-09 11 -446 val_446 2008-04-09 12 -448 val_448 2008-04-08 11 -448 val_448 2008-04-08 12 -448 val_448 2008-04-09 11 -448 val_448 2008-04-09 12 -449 val_449 2008-04-08 11 -449 val_449 2008-04-08 12 -449 val_449 2008-04-09 11 -449 val_449 2008-04-09 12 -452 val_452 2008-04-08 11 -452 val_452 2008-04-08 12 -452 val_452 2008-04-09 11 -452 val_452 2008-04-09 12 -453 val_453 2008-04-08 11 -453 val_453 2008-04-08 12 -453 val_453 2008-04-09 11 -453 val_453 2008-04-09 12 -454 val_454 2008-04-08 11 -454 val_454 2008-04-08 11 -454 val_454 2008-04-08 11 -454 val_454 2008-04-08 12 -454 val_454 2008-04-08 12 -454 val_454 2008-04-08 12 -454 val_454 2008-04-09 11 -454 val_454 2008-04-09 11 -454 val_454 2008-04-09 11 -454 val_454 2008-04-09 12 -454 val_454 2008-04-09 12 -454 val_454 2008-04-09 12 -455 val_455 2008-04-08 11 -455 val_455 2008-04-08 12 -455 val_455 2008-04-09 11 -455 val_455 2008-04-09 12 -457 val_457 2008-04-08 11 -457 val_457 2008-04-08 12 -457 val_457 2008-04-09 11 -457 val_457 2008-04-09 12 -458 val_458 2008-04-08 11 -458 val_458 2008-04-08 11 -458 val_458 2008-04-08 12 -458 val_458 2008-04-08 12 -458 val_458 2008-04-09 11 -458 val_458 2008-04-09 11 -458 val_458 2008-04-09 12 -458 val_458 2008-04-09 12 -459 val_459 2008-04-08 11 -459 val_459 2008-04-08 11 -459 val_459 2008-04-08 12 -459 val_459 2008-04-08 12 -459 val_459 2008-04-09 11 -459 val_459 2008-04-09 11 -459 val_459 2008-04-09 12 -459 val_459 2008-04-09 12 -460 val_460 2008-04-08 11 -460 val_460 2008-04-08 12 -460 val_460 2008-04-09 11 -460 val_460 2008-04-09 12 -462 val_462 2008-04-08 11 -462 val_462 2008-04-08 11 -462 val_462 2008-04-08 12 -462 val_462 2008-04-08 12 -462 val_462 2008-04-09 11 -462 val_462 2008-04-09 11 -462 val_462 2008-04-09 12 -462 val_462 2008-04-09 12 -463 val_463 2008-04-08 11 -463 val_463 2008-04-08 11 -463 val_463 2008-04-08 12 -463 val_463 2008-04-08 12 -463 val_463 2008-04-09 11 -463 val_463 2008-04-09 11 -463 val_463 2008-04-09 12 -463 val_463 2008-04-09 12 -466 val_466 2008-04-08 11 -466 val_466 2008-04-08 11 -466 val_466 2008-04-08 11 -466 val_466 2008-04-08 12 -466 val_466 2008-04-08 12 -466 val_466 2008-04-08 12 -466 val_466 2008-04-09 11 -466 val_466 2008-04-09 11 -466 val_466 2008-04-09 11 -466 val_466 2008-04-09 12 -466 val_466 2008-04-09 12 -466 val_466 2008-04-09 12 -467 val_467 2008-04-08 11 -467 val_467 2008-04-08 12 -467 val_467 2008-04-09 11 -467 val_467 2008-04-09 12 -468 val_468 2008-04-08 11 -468 val_468 2008-04-08 11 -468 val_468 2008-04-08 11 -468 val_468 2008-04-08 11 -468 val_468 2008-04-08 12 -468 val_468 2008-04-08 12 -468 val_468 2008-04-08 12 -468 val_468 2008-04-08 12 -468 val_468 2008-04-09 11 -468 val_468 2008-04-09 11 -468 val_468 2008-04-09 11 -468 val_468 2008-04-09 11 -468 val_468 2008-04-09 12 -468 val_468 2008-04-09 12 -468 val_468 2008-04-09 12 -468 val_468 2008-04-09 12 -469 val_469 2008-04-08 11 -469 val_469 2008-04-08 11 -469 val_469 2008-04-08 11 -469 val_469 2008-04-08 11 -469 val_469 2008-04-08 11 -469 val_469 2008-04-08 12 -469 val_469 2008-04-08 12 -469 val_469 2008-04-08 12 -469 val_469 2008-04-08 12 -469 val_469 2008-04-08 12 -469 val_469 2008-04-09 11 -469 val_469 2008-04-09 11 -469 val_469 2008-04-09 11 -469 val_469 2008-04-09 11 -469 val_469 2008-04-09 11 -469 val_469 2008-04-09 12 -469 val_469 2008-04-09 12 -469 val_469 2008-04-09 12 -469 val_469 2008-04-09 12 -469 val_469 2008-04-09 12 -47 val_47 2008-04-08 11 -47 val_47 2008-04-08 12 -47 val_47 2008-04-09 11 -47 val_47 2008-04-09 12 -470 val_470 2008-04-08 11 -470 val_470 2008-04-08 12 -470 val_470 2008-04-09 11 -470 val_470 2008-04-09 12 -472 val_472 2008-04-08 11 -472 val_472 2008-04-08 12 -472 val_472 2008-04-09 11 -472 val_472 2008-04-09 12 -475 val_475 2008-04-08 11 -475 val_475 2008-04-08 12 -475 val_475 2008-04-09 11 -475 val_475 2008-04-09 12 -477 val_477 2008-04-08 11 -477 val_477 2008-04-08 12 -477 val_477 2008-04-09 11 -477 val_477 2008-04-09 12 -478 val_478 2008-04-08 11 -478 val_478 2008-04-08 11 -478 val_478 2008-04-08 12 -478 val_478 2008-04-08 12 -478 val_478 2008-04-09 11 -478 val_478 2008-04-09 11 -478 val_478 2008-04-09 12 -478 val_478 2008-04-09 12 -479 val_479 2008-04-08 11 -479 val_479 2008-04-08 12 -479 val_479 2008-04-09 11 -479 val_479 2008-04-09 12 -480 val_480 2008-04-08 11 -480 val_480 2008-04-08 11 -480 val_480 2008-04-08 11 -480 val_480 2008-04-08 12 -480 val_480 2008-04-08 12 -480 val_480 2008-04-08 12 -480 val_480 2008-04-09 11 -480 val_480 2008-04-09 11 -480 val_480 2008-04-09 11 -480 val_480 2008-04-09 12 -480 val_480 2008-04-09 12 -480 val_480 2008-04-09 12 -481 val_481 2008-04-08 11 -481 val_481 2008-04-08 12 -481 val_481 2008-04-09 11 -481 val_481 2008-04-09 12 -482 val_482 2008-04-08 11 -482 val_482 2008-04-08 12 -482 val_482 2008-04-09 11 -482 val_482 2008-04-09 12 -483 val_483 2008-04-08 11 -483 val_483 2008-04-08 12 -483 val_483 2008-04-09 11 -483 val_483 2008-04-09 12 -484 val_484 2008-04-08 11 -484 val_484 2008-04-08 12 -484 val_484 2008-04-09 11 -484 val_484 2008-04-09 12 -485 val_485 2008-04-08 11 -485 val_485 2008-04-08 12 -485 val_485 2008-04-09 11 -485 val_485 2008-04-09 12 -487 val_487 2008-04-08 11 -487 val_487 2008-04-08 12 -487 val_487 2008-04-09 11 -487 val_487 2008-04-09 12 -489 val_489 2008-04-08 11 -489 val_489 2008-04-08 11 -489 val_489 2008-04-08 11 -489 val_489 2008-04-08 11 -489 val_489 2008-04-08 12 -489 val_489 2008-04-08 12 -489 val_489 2008-04-08 12 -489 val_489 2008-04-08 12 -489 val_489 2008-04-09 11 -489 val_489 2008-04-09 11 -489 val_489 2008-04-09 11 -489 val_489 2008-04-09 11 -489 val_489 2008-04-09 12 -489 val_489 2008-04-09 12 -489 val_489 2008-04-09 12 -489 val_489 2008-04-09 12 -490 val_490 2008-04-08 11 -490 val_490 2008-04-08 12 -490 val_490 2008-04-09 11 -490 val_490 2008-04-09 12 -491 val_491 2008-04-08 11 -491 val_491 2008-04-08 12 -491 val_491 2008-04-09 11 -491 val_491 2008-04-09 12 -492 val_492 2008-04-08 11 -492 val_492 2008-04-08 11 -492 val_492 2008-04-08 12 -492 val_492 2008-04-08 12 -492 val_492 2008-04-09 11 -492 val_492 2008-04-09 11 -492 val_492 2008-04-09 12 -492 val_492 2008-04-09 12 -493 val_493 2008-04-08 11 -493 val_493 2008-04-08 12 -493 val_493 2008-04-09 11 -493 val_493 2008-04-09 12 -494 val_494 2008-04-08 11 -494 val_494 2008-04-08 12 -494 val_494 2008-04-09 11 -494 val_494 2008-04-09 12 -495 val_495 2008-04-08 11 -495 val_495 2008-04-08 12 -495 val_495 2008-04-09 11 -495 val_495 2008-04-09 12 -496 val_496 2008-04-08 11 -496 val_496 2008-04-08 12 -496 val_496 2008-04-09 11 -496 val_496 2008-04-09 12 -497 val_497 2008-04-08 11 -497 val_497 2008-04-08 12 -497 val_497 2008-04-09 11 -497 val_497 2008-04-09 12 -498 val_498 2008-04-08 11 -498 val_498 2008-04-08 11 -498 val_498 2008-04-08 11 -498 val_498 2008-04-08 12 -498 val_498 2008-04-08 12 -498 val_498 2008-04-08 12 -498 val_498 2008-04-09 11 -498 val_498 2008-04-09 11 -498 val_498 2008-04-09 11 -498 val_498 2008-04-09 12 -498 val_498 2008-04-09 12 -498 val_498 2008-04-09 12 -5 val_5 2008-04-08 11 -5 val_5 2008-04-08 11 -5 val_5 2008-04-08 11 -5 val_5 2008-04-08 12 -5 val_5 2008-04-08 12 -5 val_5 2008-04-08 12 -5 val_5 2008-04-09 11 -5 val_5 2008-04-09 11 -5 val_5 2008-04-09 11 -5 val_5 2008-04-09 12 -5 val_5 2008-04-09 12 -5 val_5 2008-04-09 12 -51 val_51 2008-04-08 11 -51 val_51 2008-04-08 11 -51 val_51 2008-04-08 12 -51 val_51 2008-04-08 12 -51 val_51 2008-04-09 11 -51 val_51 2008-04-09 11 -51 val_51 2008-04-09 12 -51 val_51 2008-04-09 12 -53 val_53 2008-04-08 11 -53 val_53 2008-04-08 12 -53 val_53 2008-04-09 11 -53 val_53 2008-04-09 12 -54 val_54 2008-04-08 11 -54 val_54 2008-04-08 12 -54 val_54 2008-04-09 11 -54 val_54 2008-04-09 12 -57 val_57 2008-04-08 11 -57 val_57 2008-04-08 12 -57 val_57 2008-04-09 11 -57 val_57 2008-04-09 12 -58 val_58 2008-04-08 11 -58 val_58 2008-04-08 11 -58 val_58 2008-04-08 12 -58 val_58 2008-04-08 12 -58 val_58 2008-04-09 11 -58 val_58 2008-04-09 11 -58 val_58 2008-04-09 12 -58 val_58 2008-04-09 12 -64 val_64 2008-04-08 11 -64 val_64 2008-04-08 12 -64 val_64 2008-04-09 11 -64 val_64 2008-04-09 12 -65 val_65 2008-04-08 11 -65 val_65 2008-04-08 12 -65 val_65 2008-04-09 11 -65 val_65 2008-04-09 12 -66 val_66 2008-04-08 11 -66 val_66 2008-04-08 12 -66 val_66 2008-04-09 11 -66 val_66 2008-04-09 12 -67 val_67 2008-04-08 11 -67 val_67 2008-04-08 11 -67 val_67 2008-04-08 12 -67 val_67 2008-04-08 12 -67 val_67 2008-04-09 11 -67 val_67 2008-04-09 11 -67 val_67 2008-04-09 12 -67 val_67 2008-04-09 12 -69 val_69 2008-04-08 11 -69 val_69 2008-04-08 12 -69 val_69 2008-04-09 11 -69 val_69 2008-04-09 12 -70 val_70 2008-04-08 11 -70 val_70 2008-04-08 11 -70 val_70 2008-04-08 11 -70 val_70 2008-04-08 12 -70 val_70 2008-04-08 12 -70 val_70 2008-04-08 12 -70 val_70 2008-04-09 11 -70 val_70 2008-04-09 11 -70 val_70 2008-04-09 11 -70 val_70 2008-04-09 12 -70 val_70 2008-04-09 12 -70 val_70 2008-04-09 12 -72 val_72 2008-04-08 11 -72 val_72 2008-04-08 11 -72 val_72 2008-04-08 12 -72 val_72 2008-04-08 12 -72 val_72 2008-04-09 11 -72 val_72 2008-04-09 11 -72 val_72 2008-04-09 12 -72 val_72 2008-04-09 12 -74 val_74 2008-04-08 11 -74 val_74 2008-04-08 12 -74 val_74 2008-04-09 11 -74 val_74 2008-04-09 12 -76 val_76 2008-04-08 11 -76 val_76 2008-04-08 11 -76 val_76 2008-04-08 12 -76 val_76 2008-04-08 12 -76 val_76 2008-04-09 11 -76 val_76 2008-04-09 11 -76 val_76 2008-04-09 12 -76 val_76 2008-04-09 12 -77 val_77 2008-04-08 11 -77 val_77 2008-04-08 12 -77 val_77 2008-04-09 11 -77 val_77 2008-04-09 12 -78 val_78 2008-04-08 11 -78 val_78 2008-04-08 12 -78 val_78 2008-04-09 11 -78 val_78 2008-04-09 12 -8 val_8 2008-04-08 11 -8 val_8 2008-04-08 12 -8 val_8 2008-04-09 11 -8 val_8 2008-04-09 12 -80 val_80 2008-04-08 11 -80 val_80 2008-04-08 12 -80 val_80 2008-04-09 11 -80 val_80 2008-04-09 12 -82 val_82 2008-04-08 11 -82 val_82 2008-04-08 12 -82 val_82 2008-04-09 11 -82 val_82 2008-04-09 12 -83 val_83 2008-04-08 11 -83 val_83 2008-04-08 11 -83 val_83 2008-04-08 12 -83 val_83 2008-04-08 12 -83 val_83 2008-04-09 11 -83 val_83 2008-04-09 11 -83 val_83 2008-04-09 12 -83 val_83 2008-04-09 12 -84 val_84 2008-04-08 11 -84 val_84 2008-04-08 11 -84 val_84 2008-04-08 12 -84 val_84 2008-04-08 12 -84 val_84 2008-04-09 11 -84 val_84 2008-04-09 11 -84 val_84 2008-04-09 12 -84 val_84 2008-04-09 12 -85 val_85 2008-04-08 11 -85 val_85 2008-04-08 12 -85 val_85 2008-04-09 11 -85 val_85 2008-04-09 12 -86 val_86 2008-04-08 11 -86 val_86 2008-04-08 12 -86 val_86 2008-04-09 11 -86 val_86 2008-04-09 12 -87 val_87 2008-04-08 11 -87 val_87 2008-04-08 12 -87 val_87 2008-04-09 11 -87 val_87 2008-04-09 12 -9 val_9 2008-04-08 11 -9 val_9 2008-04-08 12 -9 val_9 2008-04-09 11 -9 val_9 2008-04-09 12 -90 val_90 2008-04-08 11 -90 val_90 2008-04-08 11 -90 val_90 2008-04-08 11 -90 val_90 2008-04-08 12 -90 val_90 2008-04-08 12 -90 val_90 2008-04-08 12 -90 val_90 2008-04-09 11 -90 val_90 2008-04-09 11 -90 val_90 2008-04-09 11 -90 val_90 2008-04-09 12 -90 val_90 2008-04-09 12 -90 val_90 2008-04-09 12 -92 val_92 2008-04-08 11 -92 val_92 2008-04-08 12 -92 val_92 2008-04-09 11 -92 val_92 2008-04-09 12 -95 val_95 2008-04-08 11 -95 val_95 2008-04-08 11 -95 val_95 2008-04-08 12 -95 val_95 2008-04-08 12 -95 val_95 2008-04-09 11 -95 val_95 2008-04-09 11 -95 val_95 2008-04-09 12 -95 val_95 2008-04-09 12 -96 val_96 2008-04-08 11 -96 val_96 2008-04-08 12 -96 val_96 2008-04-09 11 -96 val_96 2008-04-09 12 -97 val_97 2008-04-08 11 -97 val_97 2008-04-08 11 -97 val_97 2008-04-08 12 -97 val_97 2008-04-08 12 -97 val_97 2008-04-09 11 -97 val_97 2008-04-09 11 -97 val_97 2008-04-09 12 -97 val_97 2008-04-09 12 -98 val_98 2008-04-08 11 -98 val_98 2008-04-08 11 -98 val_98 2008-04-08 12 -98 val_98 2008-04-08 12 -98 val_98 2008-04-09 11 -98 val_98 2008-04-09 11 -98 val_98 2008-04-09 12 -98 val_98 2008-04-09 12 diff --git ql/src/test/results/clientpositive/llap/materialized_view_sort.q.out ql/src/test/results/clientpositive/llap/materialized_view_sort.q.out deleted file mode 100644 index d610468064..0000000000 --- ql/src/test/results/clientpositive/llap/materialized_view_sort.q.out +++ /dev/null @@ -1,909 +0,0 @@ -PREHOOK: query: CREATE TABLE src_txn stored as orc TBLPROPERTIES ('transactional' = 'true') -AS SELECT * FROM src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@src_txn -POSTHOOK: query: CREATE TABLE src_txn stored as orc TBLPROPERTIES ('transactional' = 'true') -AS SELECT * FROM src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_txn -POSTHOOK: Lineage: src_txn.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_txn.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN -CREATE MATERIALIZED VIEW sort_mv_1 SORTED ON (key) STORED AS TEXTFILE AS -SELECT value, key FROM src_txn where key > 200 and key < 250 -PREHOOK: type: CREATE_MATERIALIZED_VIEW -PREHOOK: Input: default@src_txn -PREHOOK: Output: database:default -PREHOOK: Output: default@sort_mv_1 -POSTHOOK: query: EXPLAIN -CREATE MATERIALIZED VIEW sort_mv_1 SORTED ON (key) STORED AS TEXTFILE AS -SELECT value, key FROM src_txn where key > 200 and key < 250 -POSTHOOK: type: CREATE_MATERIALIZED_VIEW -POSTHOOK: Input: default@src_txn -POSTHOOK: Output: database:default -POSTHOOK: Output: default@sort_mv_1 -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-4 depends on stages: Stage-0, Stage-2 - Stage-3 depends on stages: Stage-4 - Stage-5 depends on stages: Stage-3 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: src_txn - filterExpr: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: value (type: string), key (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: string) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), KEY._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.sort_mv_1 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: col1, col2 - Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll') - minReductionHashAggr: 0.9818182 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: struct), _col1 (type: struct) - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-4 - Create View - columns: value string, key string - sort columns: key string - expanded text: SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250 - name: default.sort_mv_1 - original text: SELECT value, key FROM src_txn where key > 200 and key < 250 - rewrite enabled: true - - Stage: Stage-3 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: value, key - Column Types: string, string - Table: default.sort_mv_1 - - Stage: Stage-5 - Materialized View Work - - Stage: Stage-0 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: CREATE MATERIALIZED VIEW sort_mv_1 SORTED ON (key) STORED AS TEXTFILE AS -SELECT value, key FROM src_txn where key > 200 and key < 250 -PREHOOK: type: CREATE_MATERIALIZED_VIEW -PREHOOK: Input: default@src_txn -PREHOOK: Output: database:default -PREHOOK: Output: default@sort_mv_1 -POSTHOOK: query: CREATE MATERIALIZED VIEW sort_mv_1 SORTED ON (key) STORED AS TEXTFILE AS -SELECT value, key FROM src_txn where key > 200 and key < 250 -POSTHOOK: type: CREATE_MATERIALIZED_VIEW -POSTHOOK: Input: default@src_txn -POSTHOOK: Output: database:default -POSTHOOK: Output: default@sort_mv_1 -PREHOOK: query: DESCRIBE FORMATTED sort_mv_1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@sort_mv_1 -POSTHOOK: query: DESCRIBE FORMATTED sort_mv_1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@sort_mv_1 -# col_name data_type comment -value string -key string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MATERIALIZED_VIEW -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - bucketing_version 2 - materializedview.sort.columns [\"key\"] - numFiles 1 - numRows 55 - rawDataSize 605 - totalSize 660 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] - -# Materialized View Information -Original Query: SELECT value, key FROM src_txn where key > 200 and key < 250 -Expanded Query: SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250 -Rewrite Enabled: Yes -Outdated for Rewriting: No -Found 1 items -#### A masked pattern was here #### -val_201201 -val_202202 -val_203203 -val_203203 -val_205205 -val_205205 -val_207207 -val_207207 -val_208208 -val_208208 -val_208208 -val_209209 -val_209209 -val_213213 -val_213213 -val_214214 -val_216216 -val_216216 -val_217217 -val_217217 -val_218218 -val_219219 -val_219219 -val_221221 -val_221221 -val_222222 -val_223223 -val_223223 -val_224224 -val_224224 -val_226226 -val_228228 -val_229229 -val_229229 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_233233 -val_233233 -val_235235 -val_237237 -val_237237 -val_238238 -val_238238 -val_239239 -val_239239 -val_241241 -val_242242 -val_242242 -val_244244 -val_247247 -val_248248 -val_249249 -PREHOOK: query: EXPLAIN -SELECT * FROM sort_mv_1 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_1 -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM sort_mv_1 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_1 -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: sort_mv_1 - filterExpr: (UDFToDouble(key) = 238.0D) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 238.0D) (type: boolean) - Select Operator - expressions: value (type: string), key (type: string) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: SELECT * FROM sort_mv_1 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM sort_mv_1 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_1 -#### A masked pattern was here #### -val_238 238 -val_238 238 -PREHOOK: query: CREATE MATERIALIZED VIEW sort_mv_2 SORTED ON (value) STORED AS TEXTFILE AS -SELECT key, value FROM src_txn where key > 200 and key < 250 -PREHOOK: type: CREATE_MATERIALIZED_VIEW -PREHOOK: Input: default@src_txn -PREHOOK: Output: database:default -PREHOOK: Output: default@sort_mv_2 -POSTHOOK: query: CREATE MATERIALIZED VIEW sort_mv_2 SORTED ON (value) STORED AS TEXTFILE AS -SELECT key, value FROM src_txn where key > 200 and key < 250 -POSTHOOK: type: CREATE_MATERIALIZED_VIEW -POSTHOOK: Input: default@src_txn -POSTHOOK: Output: database:default -POSTHOOK: Output: default@sort_mv_2 -Found 1 items -#### A masked pattern was here #### -201val_201 -202val_202 -203val_203 -203val_203 -205val_205 -205val_205 -207val_207 -207val_207 -208val_208 -208val_208 -208val_208 -209val_209 -209val_209 -213val_213 -213val_213 -214val_214 -216val_216 -216val_216 -217val_217 -217val_217 -218val_218 -219val_219 -219val_219 -221val_221 -221val_221 -222val_222 -223val_223 -223val_223 -224val_224 -224val_224 -226val_226 -228val_228 -229val_229 -229val_229 -230val_230 -230val_230 -230val_230 -230val_230 -230val_230 -233val_233 -233val_233 -235val_235 -237val_237 -237val_237 -238val_238 -238val_238 -239val_239 -239val_239 -241val_241 -242val_242 -242val_242 -244val_244 -247val_247 -248val_248 -249val_249 -PREHOOK: query: EXPLAIN -SELECT * FROM sort_mv_2 where value = 'val_238' -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT * FROM sort_mv_2 where value = 'val_238' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: sort_mv_2 - filterExpr: (value = 'val_238') (type: boolean) - Filter Operator - predicate: (value = 'val_238') (type: boolean) - Select Operator - expressions: key (type: string), 'val_238' (type: string) - outputColumnNames: _col0, _col1 - ListSink - -PREHOOK: query: SELECT * FROM sort_mv_2 where value = 'val_238' -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM sort_mv_2 where value = 'val_238' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -238 val_238 -238 val_238 -PREHOOK: query: EXPLAIN -SELECT value FROM sort_mv_2 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT value FROM sort_mv_2 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: sort_mv_2 - filterExpr: (UDFToDouble(key) = 238.0D) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 238.0D) (type: boolean) - Select Operator - expressions: value (type: string) - outputColumnNames: _col0 - ListSink - -PREHOOK: query: SELECT value FROM sort_mv_2 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -POSTHOOK: query: SELECT value FROM sort_mv_2 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -val_238 -val_238 -PREHOOK: query: INSERT INTO src_txn VALUES (238, 'val_238_n') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@src_txn -POSTHOOK: query: INSERT INTO src_txn VALUES (238, 'val_238_n') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@src_txn -POSTHOOK: Lineage: src_txn.key SCRIPT [] -POSTHOOK: Lineage: src_txn.value SCRIPT [] -PREHOOK: query: EXPLAIN -ALTER MATERIALIZED VIEW sort_mv_1 REBUILD -PREHOOK: type: QUERY -PREHOOK: Input: default@src_txn -PREHOOK: Output: default@sort_mv_1 -POSTHOOK: query: EXPLAIN -ALTER MATERIALIZED VIEW sort_mv_1 REBUILD -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_txn -POSTHOOK: Output: default@sort_mv_1 -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - Stage-4 depends on stages: Stage-3 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: src_txn - filterExpr: ((ROW__ID.writeid > 1L) and (UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 501 Data size: 90180 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((ROW__ID.writeid > 1L) and (UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 18 Data size: 3240 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: value (type: string), key (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 18 Data size: 3240 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 18 Data size: 3240 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.sort_mv_1 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: value, key - Statistics: Num rows: 18 Data size: 3240 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: compute_stats(value, 'hll'), compute_stats(key, 'hll') - minReductionHashAggr: 0.9444444 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: struct), _col1 (type: struct) - Execution mode: llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.sort_mv_1 - - Stage: Stage-3 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: value, key - Column Types: string, string - Table: default.sort_mv_1 - - Stage: Stage-4 - Materialized View Work - -PREHOOK: query: ALTER MATERIALIZED VIEW sort_mv_1 REBUILD -PREHOOK: type: QUERY -PREHOOK: Input: default@src_txn -PREHOOK: Output: default@sort_mv_1 -POSTHOOK: query: ALTER MATERIALIZED VIEW sort_mv_1 REBUILD -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_txn -POSTHOOK: Output: default@sort_mv_1 -POSTHOOK: Lineage: sort_mv_1.key SIMPLE [(src_txn)src_txn.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: sort_mv_1.value SIMPLE [(src_txn)src_txn.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM sort_mv_1 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_1 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM sort_mv_1 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_1 -#### A masked pattern was here #### -val_238 238 -val_238 238 -val_238_n 238 -PREHOOK: query: SELECT * FROM sort_mv_2 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM sort_mv_2 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_2 -#### A masked pattern was here #### -238 val_238 -238 val_238 -PREHOOK: query: CREATE TABLE src_txn_2 stored as orc TBLPROPERTIES ('transactional' = 'true') -AS SELECT * FROM src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@src_txn_2 -POSTHOOK: query: CREATE TABLE src_txn_2 stored as orc TBLPROPERTIES ('transactional' = 'true') -AS SELECT * FROM src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_txn_2 -POSTHOOK: Lineage: src_txn_2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_txn_2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE MATERIALIZED VIEW sort_mv_3 SORTED ON (key) STORED AS TEXTFILE AS -SELECT src_txn.value, src_txn.key FROM src_txn, src_txn_2 -WHERE src_txn.key = src_txn_2.key - AND src_txn.key > 200 AND src_txn.key < 250 -PREHOOK: type: CREATE_MATERIALIZED_VIEW -PREHOOK: Input: default@src_txn -PREHOOK: Input: default@src_txn_2 -PREHOOK: Output: database:default -PREHOOK: Output: default@sort_mv_3 -POSTHOOK: query: CREATE MATERIALIZED VIEW sort_mv_3 SORTED ON (key) STORED AS TEXTFILE AS -SELECT src_txn.value, src_txn.key FROM src_txn, src_txn_2 -WHERE src_txn.key = src_txn_2.key - AND src_txn.key > 200 AND src_txn.key < 250 -POSTHOOK: type: CREATE_MATERIALIZED_VIEW -POSTHOOK: Input: default@src_txn -POSTHOOK: Input: default@src_txn_2 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@sort_mv_3 -Found 1 items -#### A masked pattern was here #### -val_201201 -val_202202 -val_203203 -val_203203 -val_203203 -val_203203 -val_205205 -val_205205 -val_205205 -val_205205 -val_207207 -val_207207 -val_207207 -val_207207 -val_208208 -val_208208 -val_208208 -val_208208 -val_208208 -val_208208 -val_208208 -val_208208 -val_208208 -val_209209 -val_209209 -val_209209 -val_209209 -val_213213 -val_213213 -val_213213 -val_213213 -val_214214 -val_216216 -val_216216 -val_216216 -val_216216 -val_217217 -val_217217 -val_217217 -val_217217 -val_218218 -val_219219 -val_219219 -val_219219 -val_219219 -val_221221 -val_221221 -val_221221 -val_221221 -val_222222 -val_223223 -val_223223 -val_223223 -val_223223 -val_224224 -val_224224 -val_224224 -val_224224 -val_226226 -val_228228 -val_229229 -val_229229 -val_229229 -val_229229 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_230230 -val_233233 -val_233233 -val_233233 -val_233233 -val_235235 -val_237237 -val_237237 -val_237237 -val_237237 -val_238_n238 -val_238238 -val_238238 -val_238238 -val_238238 -val_238_n238 -val_239239 -val_239239 -val_239239 -val_239239 -val_241241 -val_242242 -val_242242 -val_242242 -val_242242 -val_244244 -val_247247 -val_248248 -val_249249 -PREHOOK: query: INSERT INTO src_txn VALUES (238, 'val_238_n2') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@src_txn -POSTHOOK: query: INSERT INTO src_txn VALUES (238, 'val_238_n2') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@src_txn -POSTHOOK: Lineage: src_txn.key SCRIPT [] -POSTHOOK: Lineage: src_txn.value SCRIPT [] -PREHOOK: query: EXPLAIN -ALTER MATERIALIZED VIEW sort_mv_3 REBUILD -PREHOOK: type: QUERY -PREHOOK: Input: default@src_txn -PREHOOK: Input: default@src_txn_2 -PREHOOK: Output: default@sort_mv_3 -POSTHOOK: query: EXPLAIN -ALTER MATERIALIZED VIEW sort_mv_3 REBUILD -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_txn -POSTHOOK: Input: default@src_txn_2 -POSTHOOK: Output: default@sort_mv_3 -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - Stage-4 depends on stages: Stage-3 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: src_txn - filterExpr: ((ROW__ID.writeid > 2L) and (UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 502 Data size: 90862 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((ROW__ID.writeid > 2L) and (UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Map 4 - Map Operator Tree: - TableScan - alias: src_txn_2 - filterExpr: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 250.0D)) (type: boolean) - Statistics: Num rows: 55 Data size: 4785 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 55 Data size: 4785 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 4785 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: string), _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.sort_mv_3 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: value, key - Statistics: Num rows: 18 Data size: 3258 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: compute_stats(value, 'hll'), compute_stats(key, 'hll') - minReductionHashAggr: 0.9444444 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: struct), _col1 (type: struct) - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.sort_mv_3 - - Stage: Stage-3 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: value, key - Column Types: string, string - Table: default.sort_mv_3 - - Stage: Stage-4 - Materialized View Work - -PREHOOK: query: ALTER MATERIALIZED VIEW sort_mv_3 REBUILD -PREHOOK: type: QUERY -PREHOOK: Input: default@src_txn -PREHOOK: Input: default@src_txn_2 -PREHOOK: Output: default@sort_mv_3 -POSTHOOK: query: ALTER MATERIALIZED VIEW sort_mv_3 REBUILD -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_txn -POSTHOOK: Input: default@src_txn_2 -POSTHOOK: Output: default@sort_mv_3 -POSTHOOK: Lineage: sort_mv_3.key SIMPLE [(src_txn)src_txn.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: sort_mv_3.value SIMPLE [(src_txn)src_txn.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM sort_mv_3 where key = 238 -PREHOOK: type: QUERY -PREHOOK: Input: default@sort_mv_3 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM sort_mv_3 where key = 238 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@sort_mv_3 -#### A masked pattern was here #### -val_238 238 -val_238 238 -val_238 238 -val_238 238 -val_238_n 238 -val_238_n 238 -val_238_n2 238 -val_238_n2 238 diff --git ql/src/test/results/clientpositive/llap/rfc5424_parser_file_pruning.q.out ql/src/test/results/clientpositive/llap/rfc5424_parser_file_pruning.q.out deleted file mode 100644 index 3e3ac5cf7c..0000000000 --- ql/src/test/results/clientpositive/llap/rfc5424_parser_file_pruning.q.out +++ /dev/null @@ -1,676 +0,0 @@ -PREHOOK: query: CREATE EXTERNAL TABLE logs2( -facility STRING, -severity STRING, -version STRING, -ts TIMESTAMP, -hostname STRING, -app_name STRING, -proc_id STRING, -msg_id STRING, -structured_data MAP, -msg BINARY, -unmatched BINARY -) -PARTITIONED BY(dt DATE,ns STRING,app STRING) -STORED BY 'org.apache.hadoop.hive.ql.log.syslog.SyslogStorageHandler' -#### A masked pattern was here #### -PREHOOK: type: CREATETABLE -#### A masked pattern was here #### -PREHOOK: Output: database:default -PREHOOK: Output: default@logs2 -POSTHOOK: query: CREATE EXTERNAL TABLE logs2( -facility STRING, -severity STRING, -version STRING, -ts TIMESTAMP, -hostname STRING, -app_name STRING, -proc_id STRING, -msg_id STRING, -structured_data MAP, -msg BINARY, -unmatched BINARY -) -PARTITIONED BY(dt DATE,ns STRING,app STRING) -STORED BY 'org.apache.hadoop.hive.ql.log.syslog.SyslogStorageHandler' -#### A masked pattern was here #### -POSTHOOK: type: CREATETABLE -#### A masked pattern was here #### -POSTHOOK: Output: database:default -POSTHOOK: Output: default@logs2 -PREHOOK: query: MSCK REPAIR TABLE logs2 -PREHOOK: type: MSCK -PREHOOK: Output: default@logs2 -POSTHOOK: query: MSCK REPAIR TABLE logs2 -POSTHOOK: type: MSCK -POSTHOOK: Output: default@logs2 -Partitions not in metastore: logs2:dt=2019-03-21/ns=foo/app=hs2 logs2:dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:00:00.0' and '2019-03-21 07:06:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:06:00.0' and '2019-03-21 07:07:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 3 - RECORDS_OUT_INTERMEDIATE_Map_1: 3 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 445 - RECORDS_OUT_OPERATOR_FS_14: 3 - RECORDS_OUT_OPERATOR_GBY_11: 3 - RECORDS_OUT_OPERATOR_GBY_13: 3 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 3 - RECORDS_OUT_OPERATOR_SEL_10: 445 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -DEBUG 50 -INFO 376 -WARN 19 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:07:00.0' and '2019-03-21 07:08:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 4 - RECORDS_OUT_INTERMEDIATE_Map_1: 4 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 274 - RECORDS_OUT_OPERATOR_FS_14: 4 - RECORDS_OUT_OPERATOR_GBY_11: 4 - RECORDS_OUT_OPERATOR_GBY_13: 4 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 4 - RECORDS_OUT_OPERATOR_SEL_10: 274 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -DEBUG 107 -ERROR 3 -INFO 161 -WARN 3 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:08:00.0' and '2019-03-21 08:08:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:00:00.0' and '2019-03-21 08:00:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 4 - RECORDS_OUT_INTERMEDIATE_Map_1: 4 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 719 - RECORDS_OUT_OPERATOR_FS_14: 4 - RECORDS_OUT_OPERATOR_GBY_11: 4 - RECORDS_OUT_OPERATOR_GBY_13: 4 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 4 - RECORDS_OUT_OPERATOR_SEL_10: 719 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -DEBUG 157 -ERROR 3 -INFO 537 -WARN 22 -PREHOOK: query: select severity,count(*) from logs2 where dt='2019-03-21' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 5 - RECORDS_OUT_INTERMEDIATE_Map_1: 5 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FS_13: 5 - RECORDS_OUT_OPERATOR_GBY_10: 5 - RECORDS_OUT_OPERATOR_GBY_12: 5 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_11: 5 - RECORDS_OUT_OPERATOR_SEL_9: 756 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 1 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 157 -ERROR 3 -INFO 537 -NULL 37 -WARN 22 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:00:00.0' and '2019-03-22 01:08:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:08:00.0' and '2019-03-22 01:09:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 2 - RECORDS_OUT_INTERMEDIATE_Map_1: 2 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 220 - RECORDS_OUT_OPERATOR_FS_14: 2 - RECORDS_OUT_OPERATOR_GBY_11: 2 - RECORDS_OUT_OPERATOR_GBY_13: 2 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 2 - RECORDS_OUT_OPERATOR_SEL_10: 220 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -DEBUG 31 -INFO 189 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:09:00.0' and '2019-03-22 01:10:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:00:00.0' and '2019-03-22 02:00:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 2 - RECORDS_OUT_INTERMEDIATE_Map_1: 2 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 220 - RECORDS_OUT_OPERATOR_FS_14: 2 - RECORDS_OUT_OPERATOR_GBY_11: 2 - RECORDS_OUT_OPERATOR_GBY_13: 2 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 2 - RECORDS_OUT_OPERATOR_SEL_10: 220 - RECORDS_OUT_OPERATOR_TS_0: 1087 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 2 - RAW_INPUT_SPLITS_Map_1: 2 -DEBUG 31 -INFO 189 -PREHOOK: query: select severity,count(*) from logs2 where dt='2019-03-22' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 3 - RECORDS_OUT_INTERMEDIATE_Map_1: 3 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FS_13: 3 - RECORDS_OUT_OPERATOR_GBY_10: 3 - RECORDS_OUT_OPERATOR_GBY_12: 3 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_11: 3 - RECORDS_OUT_OPERATOR_SEL_9: 331 - RECORDS_OUT_OPERATOR_TS_0: 331 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 1 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 31 -INFO 189 -NULL 111 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:00:00.0' and '2019-03-21 07:06:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:06:00.0' and '2019-03-21 07:07:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 3 - RECORDS_OUT_INTERMEDIATE_Map_1: 3 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 445 - RECORDS_OUT_OPERATOR_FS_14: 3 - RECORDS_OUT_OPERATOR_GBY_11: 3 - RECORDS_OUT_OPERATOR_GBY_13: 3 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 3 - RECORDS_OUT_OPERATOR_SEL_10: 445 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 50 -INFO 376 -WARN 19 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:07:00.0' and '2019-03-21 07:08:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 4 - RECORDS_OUT_INTERMEDIATE_Map_1: 4 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 274 - RECORDS_OUT_OPERATOR_FS_14: 4 - RECORDS_OUT_OPERATOR_GBY_11: 4 - RECORDS_OUT_OPERATOR_GBY_13: 4 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 4 - RECORDS_OUT_OPERATOR_SEL_10: 274 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 107 -ERROR 3 -INFO 161 -WARN 3 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:08:00.0' and '2019-03-21 08:08:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-21 07:00:00.0' and '2019-03-21 08:00:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 4 - RECORDS_OUT_INTERMEDIATE_Map_1: 4 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 719 - RECORDS_OUT_OPERATOR_FS_14: 4 - RECORDS_OUT_OPERATOR_GBY_11: 4 - RECORDS_OUT_OPERATOR_GBY_13: 4 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 4 - RECORDS_OUT_OPERATOR_SEL_10: 719 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 157 -ERROR 3 -INFO 537 -WARN 22 -PREHOOK: query: select severity,count(*) from logs2 where dt='2019-03-21' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 756 - RECORDS_OUT_0: 5 - RECORDS_OUT_INTERMEDIATE_Map_1: 5 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FS_13: 5 - RECORDS_OUT_OPERATOR_GBY_10: 5 - RECORDS_OUT_OPERATOR_GBY_12: 5 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_11: 5 - RECORDS_OUT_OPERATOR_SEL_9: 756 - RECORDS_OUT_OPERATOR_TS_0: 756 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 1 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 157 -ERROR 3 -INFO 537 -NULL 37 -WARN 22 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:00:00.0' and '2019-03-22 01:08:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 331 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:08:00.0' and '2019-03-22 01:09:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 2 - RECORDS_OUT_INTERMEDIATE_Map_1: 2 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 220 - RECORDS_OUT_OPERATOR_FS_14: 2 - RECORDS_OUT_OPERATOR_GBY_11: 2 - RECORDS_OUT_OPERATOR_GBY_13: 2 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 2 - RECORDS_OUT_OPERATOR_SEL_10: 220 - RECORDS_OUT_OPERATOR_TS_0: 331 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 31 -INFO 189 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:09:00.0' and '2019-03-22 01:10:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 0 - RECORDS_OUT_INTERMEDIATE_Map_1: 0 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 0 - RECORDS_OUT_OPERATOR_FS_14: 0 - RECORDS_OUT_OPERATOR_GBY_11: 0 - RECORDS_OUT_OPERATOR_GBY_13: 0 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 0 - RECORDS_OUT_OPERATOR_SEL_10: 0 - RECORDS_OUT_OPERATOR_TS_0: 331 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -PREHOOK: query: select severity,count(*) from logs2 where ts between '2019-03-22 01:00:00.0' and '2019-03-22 02:00:00.0' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-21/ns=foo/app=hs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 2 - RECORDS_OUT_INTERMEDIATE_Map_1: 2 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_9: 220 - RECORDS_OUT_OPERATOR_FS_14: 2 - RECORDS_OUT_OPERATOR_GBY_11: 2 - RECORDS_OUT_OPERATOR_GBY_13: 2 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_12: 2 - RECORDS_OUT_OPERATOR_SEL_10: 220 - RECORDS_OUT_OPERATOR_TS_0: 331 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 2 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 31 -INFO 189 -PREHOOK: query: select severity,count(*) from logs2 where dt='2019-03-22' group by severity -PREHOOK: type: QUERY -PREHOOK: Input: default@logs2 -PREHOOK: Input: default@logs2@dt=2019-03-22/ns=bar/app=hs2 -#### A masked pattern was here #### -Stage-1 FILE SYSTEM COUNTERS: -Stage-1 HIVE COUNTERS: - CREATED_FILES: 2 - DESERIALIZE_ERRORS: 0 - RECORDS_IN_Map_1: 331 - RECORDS_OUT_0: 3 - RECORDS_OUT_INTERMEDIATE_Map_1: 3 - RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FS_13: 3 - RECORDS_OUT_OPERATOR_GBY_10: 3 - RECORDS_OUT_OPERATOR_GBY_12: 3 - RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_11: 3 - RECORDS_OUT_OPERATOR_SEL_9: 331 - RECORDS_OUT_OPERATOR_TS_0: 331 -Stage-1 INPUT COUNTERS: - GROUPED_INPUT_SPLITS_Map_1: 1 - INPUT_DIRECTORIES_Map_1: 1 - INPUT_FILES_Map_1: 1 - RAW_INPUT_SPLITS_Map_1: 1 -DEBUG 31 -INFO 189 -NULL 111 -PREHOOK: query: drop table logs2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@logs2 -PREHOOK: Output: default@logs2 diff --git ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part.q.out ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part.q.out deleted file mode 100644 index 7cc4262a0c..0000000000 --- ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part.q.out +++ /dev/null @@ -1,1797 +0,0 @@ -PREHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) -row format delimited fields terminated by '|' stored as textfile -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@schema_evolution_data -POSTHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) -row format delimited fields terminated by '|' stored as textfile -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@schema_evolution_data -PREHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@schema_evolution_data -POSTHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@schema_evolution_data -PREHOOK: query: CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_add_int_permute_select -POSTHOOK: query: CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_add_int_permute_select -PREHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 -PREHOOK: query: alter table part_add_int_permute_select add columns(c int) -PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Output: default@part_add_int_permute_select -POSTHOOK: query: alter table part_add_int_permute_select add columns(c int) -POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Output: default@part_add_int_permute_select -PREHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).c SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 col4 -PREHOOK: query: explain vectorization only detail -select insert_num,part,a,b,c from part_add_int_permute_select -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,a,b,c from part_add_int_permute_select -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:a:int, 2:b:string, 3:c:int, 4:part:int, 5:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 4, 1, 2, 3] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 4 - includeColumns: [0, 1, 2, 3] - dataColumns: insert_num:int, a:int, b:string, c:int - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,a,b from part_add_int_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b from part_add_int_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b -1 1 1111 new -2 1 2222 new -PREHOOK: query: select insert_num,part,a,b,c from part_add_int_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b,c from part_add_int_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b c -1 1 1111 new NULL -2 1 2222 new 3333 -PREHOOK: query: select insert_num,part,c from part_add_int_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c from part_add_int_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -insert_num part c -1 1 NULL -2 1 3333 -PREHOOK: query: drop table part_add_int_permute_select -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Output: default@part_add_int_permute_select -POSTHOOK: query: drop table part_add_int_permute_select -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Output: default@part_add_int_permute_select -PREHOOK: query: CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_add_int_string_permute_select -POSTHOOK: query: CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_add_int_string_permute_select -PREHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 -PREHOOK: query: alter table part_add_int_string_permute_select add columns(c int, d string) -PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Output: default@part_add_int_string_permute_select -POSTHOOK: query: alter table part_add_int_string_permute_select add columns(c int, d string) -POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Output: default@part_add_int_string_permute_select -PREHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).c SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).d SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 col4 col5 -PREHOOK: query: explain vectorization only detail -select insert_num,part,a,b,c,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,a,b,c,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:a:int, 2:b:string, 3:c:int, 4:d:string, 5:part:int, 6:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2, 3, 4] - dataColumns: insert_num:int, a:int, b:string, c:int, d:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,a,b from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b -1 1 1111 new -2 1 2222 new -PREHOOK: query: select insert_num,part,a,b,c from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b,c from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b c -1 1 1111 new NULL -2 1 2222 new 3333 -PREHOOK: query: select insert_num,part,a,b,c,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b,c,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b c d -1 1 1111 new NULL NULL -2 1 2222 new 3333 4444 -PREHOOK: query: select insert_num,part,a,c,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,c,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a c d -1 1 1111 NULL NULL -2 1 2222 3333 4444 -PREHOOK: query: select insert_num,part,a,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a d -1 1 1111 NULL -2 1 2222 4444 -PREHOOK: query: select insert_num,part,c from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part c -1 1 NULL -2 1 3333 -PREHOOK: query: select insert_num,part,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part d -1 1 NULL -2 1 4444 -PREHOOK: query: drop table part_add_int_string_permute_select -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Output: default@part_add_int_string_permute_select -POSTHOOK: query: drop table part_add_int_string_permute_select -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Output: default@part_add_int_string_permute_select -PREHOOK: query: CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_string_group_double -POSTHOOK: query: CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_string_group_double -PREHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double_str, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double_str, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double_str, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -_col0 _col1 _col2 _col3 _col4 -PREHOOK: query: alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_string_group_double -PREHOOK: Output: default@part_change_string_group_double -POSTHOOK: query: alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_string_group_double -POSTHOOK: Output: default@part_change_string_group_double -PREHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111 -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num double1 double1 double1 _c4 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:double, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int, 6:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2, 3, 4] - dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_string_group_double -PREHOOK: Input: default@part_change_string_group_double@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_string_group_double -POSTHOOK: Input: default@part_change_string_group_double@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 b -101 1 1.7976931348623157E308 1.7976931348623157E308 1.7976931348623157E308 original -102 1 -1.7976931348623157E308 -1.7976931348623157E308 -1.7976931348623157E308 original -103 1 NULL NULL NULL original -104 1 30.774 30.774 30.774 original -105 1 46114.28 46114.28 46114.28 original -PREHOOK: query: drop table part_change_string_group_double -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_string_group_double -PREHOOK: Output: default@part_change_string_group_double -POSTHOOK: query: drop table part_change_string_group_double -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_string_group_double -POSTHOOK: Output: default@part_change_string_group_double -PREHOOK: query: CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: query: CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp -PREHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num date1 date1 date1 date1 date1 timestamp1 timestamp1 timestamp1 timestamp1 timestamp1 _c11 -PREHOOK: query: alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: query: alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp -PREHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:char(50), 3:c3:char(15), 4:c4:varchar(50), 5:c5:varchar(15), 6:c6:string, 7:c7:char(50), 8:c8:char(15), 9:c9:varchar(50), 10:c10:varchar(15), 11:b:string, 12:part:int, 13:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 12 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - dataColumns: insert_num:int, c1:string, c2:char(50), c3:char(15), c4:varchar(50), c5:varchar(15), c6:string, c7:char(50), c8:char(15), c9:varchar(50), c10:varchar(15), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 b -101 1 1950-12-18 1950-12-18 1950-12-18 1950-12-18 1950-12-18 6229-06-28 02:54:28.970117179 6229-06-28 02:54:28.970117179 6229-06-28 02:5 6229-06-28 02:54:28.970117179 6229-06-28 02:5 original -102 1 2049-12-18 2049-12-18 2049-12-18 2049-12-18 2049-12-18 5966-07-09 03:30:50.597 5966-07-09 03:30:50.597 5966-07-09 03:3 5966-07-09 03:30:50.597 5966-07-09 03:3 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 2021-09-24 2021-09-24 2021-09-24 2021-09-24 2021-09-24 1978-08-02 06:34:14.0 1978-08-02 06:34:14.0 1978-08-02 06:3 1978-08-02 06:34:14.0 1978-08-02 06:3 original -105 1 2024-11-11 2024-11-11 2024-11-11 2024-11-11 2024-11-11 1991-01-06 16:20:39.72036854 1991-01-06 16:20:39.72036854 1991-01-06 16:2 1991-01-06 16:20:39.72036854 1991-01-06 16:2 original -111 1 filler filler filler filler filler filler filler filler filler filler new -PREHOOK: query: drop table part_change_date_group_string_group_date_timestamp -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: query: drop table part_change_date_group_string_group_date_timestamp -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp -PREHOOK: query: CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, - c1 tinyint, c2 smallint, c3 int, c4 bigint, - c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, - c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, - b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: query: CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, - c1 tinyint, c2 smallint, c3 int, c4 bigint, - c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, - c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, - b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, - tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, - tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c16 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c17 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c18 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c19 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c20 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 _c21 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:tinyint, 2:c2:smallint, 3:c3:int, 4:c4:bigint, 5:c5:tinyint, 6:c6:smallint, 7:c7:int, 8:c8:bigint, 9:c9:tinyint, 10:c10:smallint, 11:c11:int, 12:c12:bigint, 13:c13:tinyint, 14:c14:smallint, 15:c15:int, 16:c16:bigint, 17:c17:tinyint, 18:c18:smallint, 19:c19:int, 20:c20:bigint, 21:b:string, 22:part:int, 23:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 22 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - dataColumns: insert_num:int, c1:tinyint, c2:smallint, c3:int, c4:bigint, c5:tinyint, c6:smallint, c7:int, c8:bigint, c9:tinyint, c10:smallint, c11:int, c12:bigint, c13:tinyint, c14:smallint, c15:int, c16:bigint, c17:tinyint, c18:smallint, c19:int, c20:bigint, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 b -101 1 -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL original -102 1 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 203332 888888857923222 original -105 1 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 original -PREHOOK: query: alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, c4 STRING, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), - c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), - b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: query: alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, c4 STRING, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), - c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), - b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c11 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c12 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c13 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c14 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c15 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c16 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c17 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c18 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c19 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c20 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 _col12 _col13 _col14 _col15 _col16 _col17 _col18 _col19 _col20 _col21 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:string, 5:c5:char(50), 6:c6:char(50), 7:c7:char(50), 8:c8:char(50), 9:c9:char(5), 10:c10:char(5), 11:c11:char(5), 12:c12:char(5), 13:c13:varchar(50), 14:c14:varchar(50), 15:c15:varchar(50), 16:c16:varchar(50), 17:c17:varchar(5), 18:c18:varchar(5), 19:c19:varchar(5), 20:c20:varchar(5), 21:b:string, 22:part:int, 23:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 22 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 b -101 1 -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147 NULL -128 NULL -2147483648 NULL -128 NULL -2147 NULL original -102 1 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 21474 92233 127 32767 2147483647 9223372036854775807 127 32767 21474 92233 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 20333 88888 23 834 203332 888888857923222 23 834 20333 88888 original -105 1 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -2830 -9999 -2222 -99 -28300 -999992 -222282153733 -99 -2830 -9999 -2222 original -111 1 filler filler filler filler filler filler filler filler fille fille fille fille filler filler filler filler fille fille fille fille new -PREHOOK: query: drop table part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: query: drop table part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: query: CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, - c1 decimal(38,18), c2 float, c3 double, - c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, - c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, - b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, - c1 decimal(38,18), c2 float, c3 double, - c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, - c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, - b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, - decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, - decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num decimal1 float1 double1 decimal1 float1 double1 decimal1 float1 double1 decimal1 float1 double1 decimal1 float1 double1 _c16 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:decimal(38,18), 2:c2:float, 3:c3:double, 4:c4:decimal(38,18), 5:c5:float, 6:c6:double, 7:c7:decimal(38,18), 8:c8:float, 9:c9:double, 10:c10:decimal(38,18), 11:c11:float, 12:c12:double, 13:c13:decimal(38,18), 14:c14:float, 15:c15:double, 16:b:string, 17:part:int, 18:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 17 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - dataColumns: insert_num:int, c1:decimal(38,18), c2:float, c3:double, c4:decimal(38,18), c5:float, c6:double, c7:decimal(38,18), c8:float, c9:double, c10:decimal(38,18), c11:float, c12:double, c13:decimal(38,18), c14:float, c15:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 b -101 1 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 original -102 1 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 original -105 1 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 original -PREHOOK: query: alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, - c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), - c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), - b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, - c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), - c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), - b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c11 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c12 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c13 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c14 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c15 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 _col12 _col13 _col14 _col15 _col16 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50), 6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50), 11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7), 15:c15:varchar(7), 16:b:string, 17:part:int, 18:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 17 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 b -101 1 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 9999999 Infinit 1.79769 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 9999999 Infinit 1.79769 original -102 1 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -999999 -Infini -1.7976 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -999999 -Infini -1.7976 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 66475.561431 -100.35978 30.774 66475.561431 -100.35978 30.774 66475.5 -100.35 30.774 66475.561431 -100.35978 30.774 66475.5 -100.35 30.774 original -105 1 9250340.75 NULL 46114.28 9250340.75 NULL 46114.28 9250340 NULL 46114.2 9250340.75 NULL 46114.28 9250340 NULL 46114.2 original -111 1 filler filler filler filler filler filler filler filler filler filler filler filler filler filler filler new -PREHOOK: query: drop table part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: drop table part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: query: CREATE TABLE part_change_string_group_string_group_string(insert_num int, - c1 string, c2 string, c3 string, c4 string, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), - c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_string_group_string_group_string -POSTHOOK: query: CREATE TABLE part_change_string_group_string_group_string(insert_num int, - c1 string, c2 string, c3 string, c4 string, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), - c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_string_group_string_group_string -PREHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, - string2, string2, string2, string2, - string2, string2, string2, - string2, string2, string2, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, - string2, string2, string2, string2, - string2, string2, string2, - string2, string2, string2, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c10 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c5 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c6 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c7 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c8 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:string, 5:c5:char(50), 6:c6:char(50), 7:c7:char(50), 8:c8:varchar(50), 9:c9:varchar(50), 10:c10:varchar(50), 11:b:string, 12:part:int, 13:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 12, 1, 2, 3, 4, 11] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 12 - includeColumns: [0, 1, 2, 3, 4, 11] - dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:varchar(50), c9:varchar(50), c10:varchar(50), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 b -101 1 freckled freckled freckled freckled original -102 1 ox ox ox ox original -103 1 original -104 1 I cooked I cooked I cooked I cooked original -105 1 200 200 200 200 original -PREHOOK: query: alter table part_change_string_group_string_group_string replace columns (insert_num int, - c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), - c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, - c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Output: default@part_change_string_group_string_group_string -POSTHOOK: query: alter table part_change_string_group_string_group_string replace columns (insert_num int, - c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), - c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, - c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Output: default@part_change_string_group_string_group_string -PREHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:char(50), 2:c2:char(9), 3:c3:varchar(50), 4:c4:char(9), 5:c5:varchar(50), 6:c6:varchar(9), 7:c7:string, 8:c8:char(50), 9:c9:char(9), 10:c10:string, 11:b:string, 12:part:int, 13:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 12 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - dataColumns: insert_num:int, c1:char(50), c2:char(9), c3:varchar(50), c4:char(9), c5:varchar(50), c6:varchar(9), c7:string, c8:char(50), c9:char(9), c10:string, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 b -101 1 freckled freckled freckled freckled freckled freckled freckled freckled freckled freckled original -102 1 ox ox ox ox ox ox ox ox ox ox original -103 1 original -104 1 I cooked I cooked I cooked I cooked I cooked I cooked I cooked I cooked I cooked I cooked original -105 1 200 200 200 200 200 200 200 200 200 200 original -111 1 filler filler filler filler filler filler filler filler filler filler new -PREHOOK: query: drop table part_change_string_group_string_group_string -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Output: default@part_change_string_group_string_group_string -POSTHOOK: query: drop table part_change_string_group_string_group_string -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Output: default@part_change_string_group_string_group_string -PREHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, - c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, - c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, - c12 int, c13 int, c14 int, c15 int, - c16 bigint, c17 bigint, c18 bigint, - b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, - c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, - c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, - c12 int, c13 int, c14 int, c15 int, - c16 bigint, c17 bigint, c18 bigint, - b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, - tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, - smallint1, smallint1, smallint1, smallint1, smallint1, - int1, int1, int1, int1, - bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, - tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, - smallint1, smallint1, smallint1, smallint1, smallint1, - int1, int1, int1, int1, - bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c16 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c17 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c18 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num tinyint1 tinyint1 tinyint1 tinyint1 tinyint1 tinyint1 smallint1 smallint1 smallint1 smallint1 smallint1 int1 int1 int1 int1 bigint1 bigint1 bigint1 _c19 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:tinyint, 2:c2:tinyint, 3:c3:tinyint, 4:c4:tinyint, 5:c5:tinyint, 6:c6:tinyint, 7:c7:smallint, 8:c8:smallint, 9:c9:smallint, 10:c10:smallint, 11:c11:smallint, 12:c12:int, 13:c13:int, 14:c14:int, 15:c15:int, 16:c16:bigint, 17:c17:bigint, 18:c18:bigint, 19:b:string, 20:part:int, 21:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 20 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - dataColumns: insert_num:int, c1:tinyint, c2:tinyint, c3:tinyint, c4:tinyint, c5:tinyint, c6:tinyint, c7:smallint, c8:smallint, c9:smallint, c10:smallint, c11:smallint, c12:int, c13:int, c14:int, c15:int, c16:bigint, c17:bigint, c18:bigint, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 b -101 1 -128 -128 -128 -128 -128 -128 NULL NULL NULL NULL NULL -2147483648 -2147483648 -2147483648 -2147483648 NULL NULL NULL original -102 1 127 127 127 127 127 127 32767 32767 32767 32767 32767 2147483647 2147483647 2147483647 2147483647 9223372036854775807 9223372036854775807 9223372036854775807 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 23 23 23 23 23 834 834 834 834 834 203332 203332 203332 203332 888888857923222 888888857923222 888888857923222 original -105 1 -99 -99 -99 -99 -99 -99 -28300 -28300 -28300 -28300 -28300 -999992 -999992 -999992 -999992 -222282153733 -222282153733 -222282153733 original -PREHOOK: query: alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, - c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, - c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, - c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, - c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, - b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: query: alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, - c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, - c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, - c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, - c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, - b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, - 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, - 80000, 90000000, 1234.5678, 9876.543, 789.321, - 90000000, 1234.5678, 9876.543, 789.321, - 1234.5678, 9876.543, 789.321, - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, - 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, - 80000, 90000000, 1234.5678, 9876.543, 789.321, - 90000000, 1234.5678, 9876.543, 789.321, - 1234.5678, 9876.543, 789.321, - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c11 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c12 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c13 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c14 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c15 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c16 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c17 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c18 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 _col12 _col13 _col14 _col15 _col16 _col17 _col18 _col19 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:smallint, 2:c2:int, 3:c3:bigint, 4:c4:decimal(38,18), 5:c5:float, 6:c6:double, 7:c7:int, 8:c8:bigint, 9:c9:decimal(38,18), 10:c10:float, 11:c11:double, 12:c12:bigint, 13:c13:decimal(38,18), 14:c14:float, 15:c15:double, 16:c16:decimal(38,18), 17:c17:float, 18:c18:double, 19:b:string, 20:part:int, 21:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 20 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - dataColumns: insert_num:int, c1:smallint, c2:int, c3:bigint, c4:decimal(38,18), c5:float, c6:double, c7:int, c8:bigint, c9:decimal(38,18), c10:float, c11:double, c12:bigint, c13:decimal(38,18), c14:float, c15:double, c16:decimal(38,18), c17:float, c18:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 b -101 1 -128 -128 -128 -128.000000000000000000 -128.0 -128.0 NULL NULL NULL NULL NULL -2147483648 -2147483648.000000000000000000 -2.14748365E9 -2.147483648E9 NULL NULL NULL original -102 1 127 127 127 127.000000000000000000 127.0 127.0 32767 32767 32767.000000000000000000 32767.0 32767.0 2147483647 2147483647.000000000000000000 2.14748365E9 2.147483647E9 9223372036854775807.000000000000000000 9.223372E18 9.223372036854776E18 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 23 23 23.000000000000000000 23.0 23.0 834 834 834.000000000000000000 834.0 834.0 203332 203332.000000000000000000 203332.0 203332.0 888888857923222.000000000000000000 8.8888885E14 8.88888857923222E14 original -105 1 -99 -99 -99 -99.000000000000000000 -99.0 -99.0 -28300 -28300 -28300.000000000000000000 -28300.0 -28300.0 -999992 -999992.000000000000000000 -999992.0 -999992.0 -222282153733.000000000000000000 -2.22282154E11 -2.22282153733E11 original -111 1 7000 80000 90000000 1234.567800000000000000 9876.543 789.321 80000 90000000 1234.567800000000000000 9876.543 789.321 90000000 1234.567800000000000000 9876.543 789.321 1234.567800000000000000 9876.543 789.321 new -PREHOOK: query: drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: query: drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, - c1 decimal(38,18), c2 decimal(38,18), - c3 float, - b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, - c1 decimal(38,18), c2 decimal(38,18), - c3 float, - b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, - decimal1, decimal1, - float1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, - decimal1, decimal1, - float1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num decimal1 decimal1 float1 _c4 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:decimal(38,18), 2:c2:decimal(38,18), 3:c3:float, 4:b:string, 5:part:int, 6:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2, 3, 4] - dataColumns: insert_num:int, c1:decimal(38,18), c2:decimal(38,18), c3:float, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 b -101 1 99999999999999999999.999999999999999999 99999999999999999999.999999999999999999 Infinity original -102 1 -99999999999999999999.999999999999999999 -99999999999999999999.999999999999999999 -Infinity original -103 1 NULL NULL NULL original -104 1 66475.561431000000000000 66475.561431000000000000 -100.35978 original -105 1 9250340.750000000000000000 9250340.750000000000000000 NULL original -PREHOOK: query: alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: query: alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 -PREHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez - Vertices: - Map 1 - Map Operator Tree: - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:float, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int, 6:ROW__ID:struct] - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Execution mode: vectorized, llap - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2, 3, 4] - dataColumns: insert_num:int, c1:float, c2:double, c3:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - -PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 b -101 1 1.0E20 1.0E20 Infinity original -102 1 -1.0E20 -1.0E20 -Infinity original -103 1 NULL NULL NULL original -104 1 66475.56 66475.561431 -100.35978 original -105 1 9250341.0 9250340.75 NULL original -111 1 1234.5677 9876.543 1234.5678 new -PREHOOK: query: drop table part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: query: drop table part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float diff --git ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_llap_io.q.out ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_llap_io.q.out deleted file mode 100644 index bc652d271a..0000000000 --- ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_llap_io.q.out +++ /dev/null @@ -1,1708 +0,0 @@ -PREHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) -row format delimited fields terminated by '|' stored as textfile -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@schema_evolution_data -POSTHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) -row format delimited fields terminated by '|' stored as textfile -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@schema_evolution_data -PREHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@schema_evolution_data -POSTHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@schema_evolution_data -PREHOOK: query: CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_add_int_permute_select -POSTHOOK: query: CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_add_int_permute_select -PREHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 -PREHOOK: query: alter table part_add_int_permute_select add columns(c int) -PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Output: default@part_add_int_permute_select -POSTHOOK: query: alter table part_add_int_permute_select add columns(c int) -POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Output: default@part_add_int_permute_select -PREHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).c SCRIPT [] -POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 col4 -PREHOOK: query: explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_add_int_permute_select - Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:a:int, 2:b:string, 3:c:int, 4:part:int, 5:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 4, 1, 2] - Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 4 - includeColumns: [0, 1, 2] - dataColumns: insert_num:int, a:int, b:string, c:int - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,a,b from part_add_int_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b from part_add_int_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b -1 1 1111 new -2 1 2222 new -PREHOOK: query: select insert_num,part,a,b,c from part_add_int_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b,c from part_add_int_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b c -1 1 1111 new NULL -2 1 2222 new 3333 -PREHOOK: query: select insert_num,part,c from part_add_int_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c from part_add_int_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Input: default@part_add_int_permute_select@part=1 -#### A masked pattern was here #### -insert_num part c -1 1 NULL -2 1 3333 -PREHOOK: query: drop table part_add_int_permute_select -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_add_int_permute_select -PREHOOK: Output: default@part_add_int_permute_select -POSTHOOK: query: drop table part_add_int_permute_select -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_add_int_permute_select -POSTHOOK: Output: default@part_add_int_permute_select -PREHOOK: query: CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_add_int_string_permute_select -POSTHOOK: query: CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_add_int_string_permute_select -PREHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 -PREHOOK: query: alter table part_add_int_string_permute_select add columns(c int, d string) -PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Output: default@part_add_int_string_permute_select -POSTHOOK: query: alter table part_add_int_string_permute_select add columns(c int, d string) -POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Output: default@part_add_int_string_permute_select -PREHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: query: insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_add_int_string_permute_select@part=1 -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).c SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).d SCRIPT [] -POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num SCRIPT [] -col1 col2 col3 col4 col5 -PREHOOK: query: explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_add_int_string_permute_select - Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:a:int, 2:b:string, 3:c:int, 4:d:string, 5:part:int, 6:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2] - Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2] - dataColumns: insert_num:int, a:int, b:string, c:int, d:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,a,b from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b -1 1 1111 new -2 1 2222 new -PREHOOK: query: select insert_num,part,a,b,c from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b,c from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b c -1 1 1111 new NULL -2 1 2222 new 3333 -PREHOOK: query: select insert_num,part,a,b,c,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,b,c,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a b c d -1 1 1111 new NULL NULL -2 1 2222 new 3333 4444 -PREHOOK: query: select insert_num,part,a,c,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,c,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a c d -1 1 1111 NULL NULL -2 1 2222 3333 4444 -PREHOOK: query: select insert_num,part,a,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,a,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part a d -1 1 1111 NULL -2 1 2222 4444 -PREHOOK: query: select insert_num,part,c from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part c -1 1 NULL -2 1 3333 -PREHOOK: query: select insert_num,part,d from part_add_int_string_permute_select -PREHOOK: type: QUERY -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,d from part_add_int_string_permute_select -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Input: default@part_add_int_string_permute_select@part=1 -#### A masked pattern was here #### -insert_num part d -1 1 NULL -2 1 4444 -PREHOOK: query: drop table part_add_int_string_permute_select -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_add_int_string_permute_select -PREHOOK: Output: default@part_add_int_string_permute_select -POSTHOOK: query: drop table part_add_int_string_permute_select -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_add_int_string_permute_select -POSTHOOK: Output: default@part_add_int_string_permute_select -PREHOOK: query: CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_string_group_double -POSTHOOK: query: CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_string_group_double -PREHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double_str, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double_str, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double_str, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -_col0 _col1 _col2 _col3 _col4 -PREHOOK: query: alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_string_group_double -PREHOOK: Output: default@part_change_string_group_double -POSTHOOK: query: alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_string_group_double -POSTHOOK: Output: default@part_change_string_group_double -PREHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111 -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: query: insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_string_group_double@part=1 -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num double1 double1 double1 _c4 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_string_group_double - Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:double, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int, 6:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2, 3, 4] - dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_string_group_double -PREHOOK: Input: default@part_change_string_group_double@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_string_group_double -POSTHOOK: Input: default@part_change_string_group_double@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 b -101 1 1.7976931348623157E308 1.7976931348623157E308 1.7976931348623157E308 original -102 1 -1.7976931348623157E308 -1.7976931348623157E308 -1.7976931348623157E308 original -103 1 NULL NULL NULL original -104 1 30.774 30.774 30.774 original -105 1 46114.28 46114.28 46114.28 original -PREHOOK: query: drop table part_change_string_group_double -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_string_group_double -PREHOOK: Output: default@part_change_string_group_double -POSTHOOK: query: drop table part_change_string_group_double -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_string_group_double -POSTHOOK: Output: default@part_change_string_group_double -PREHOOK: query: CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: query: CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp -PREHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:date1, type:date, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:timestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num date1 date1 date1 date1 date1 timestamp1 timestamp1 timestamp1 timestamp1 timestamp1 _c11 -PREHOOK: query: alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: query: alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp -PREHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: query: insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp@part=1 -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_date_group_string_group_date_timestamp - Statistics: Num rows: 6 Data size: 12449 Basic stats: COMPLETE Column stats: PARTIAL - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:char(50), 3:c3:char(15), 4:c4:varchar(50), 5:c5:varchar(15), 6:c6:string, 7:c7:char(50), 8:c8:char(15), 9:c9:varchar(50), 10:c10:varchar(15), 11:b:string, 12:part:int, 13:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: varchar(50)), c5 (type: varchar(15)), c6 (type: string), c7 (type: char(50)), c8 (type: char(15)), c9 (type: varchar(50)), c10 (type: varchar(15)), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - Statistics: Num rows: 6 Data size: 8952 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 6 Data size: 8952 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 12 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - dataColumns: insert_num:int, c1:string, c2:char(50), c3:char(15), c4:varchar(50), c5:varchar(15), c6:string, c7:char(50), c8:char(15), c9:varchar(50), c10:varchar(15), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 b -101 1 1950-12-18 1950-12-18 1950-12-18 1950-12-18 1950-12-18 6229-06-28 02:54:28.970117179 6229-06-28 02:54:28.970117179 6229-06-28 02:5 6229-06-28 02:54:28.970117179 6229-06-28 02:5 original -102 1 2049-12-18 2049-12-18 2049-12-18 2049-12-18 2049-12-18 5966-07-09 03:30:50.597 5966-07-09 03:30:50.597 5966-07-09 03:3 5966-07-09 03:30:50.597 5966-07-09 03:3 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 2021-09-24 2021-09-24 2021-09-24 2021-09-24 2021-09-24 1978-08-02 06:34:14.0 1978-08-02 06:34:14.0 1978-08-02 06:3 1978-08-02 06:34:14.0 1978-08-02 06:3 original -105 1 2024-11-11 2024-11-11 2024-11-11 2024-11-11 2024-11-11 1991-01-06 16:20:39.72036854 1991-01-06 16:20:39.72036854 1991-01-06 16:2 1991-01-06 16:20:39.72036854 1991-01-06 16:2 original -111 1 filler filler filler filler filler filler filler filler filler filler new -PREHOOK: query: drop table part_change_date_group_string_group_date_timestamp -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp -PREHOOK: Output: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: query: drop table part_change_date_group_string_group_date_timestamp -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp -POSTHOOK: Output: default@part_change_date_group_string_group_date_timestamp -PREHOOK: query: CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, - c1 tinyint, c2 smallint, c3 int, c4 bigint, - c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, - c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, - b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: query: CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, - c1 tinyint, c2 smallint, c3 int, c4 bigint, - c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, - c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, - b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, - tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, - tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c16 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c17 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c18 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c19 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c20 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 tinyint1 smallint1 int1 bigint1 _c21 -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 b -101 1 -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL original -102 1 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 203332 888888857923222 original -105 1 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 original -PREHOOK: query: alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, c4 STRING, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), - c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), - b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: query: alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, c4 STRING, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), - c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), - b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c11 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c12 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c13 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c14 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c15 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c16 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c17 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c18 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c19 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c20 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 _col12 _col13 _col14 _col15 _col16 _col17 _col18 _col19 _col20 _col21 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_numeric_group_string_group_multi_ints_string_group - Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:string, 5:c5:char(50), 6:c6:char(50), 7:c7:char(50), 8:c8:char(50), 9:c9:char(5), 10:c10:char(5), 11:c11:char(5), 12:c12:char(5), 13:c13:varchar(50), 14:c14:varchar(50), 15:c15:varchar(50), 16:c16:varchar(50), 17:c17:varchar(5), 18:c18:varchar(5), 19:c19:varchar(5), 20:c20:varchar(5), 21:b:string, 22:part:int, 23:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 22 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 b -101 1 -128 NULL -2147483648 NULL -128 NULL -2147483648 NULL -128 NULL -2147 NULL -128 NULL -2147483648 NULL -128 NULL -2147 NULL original -102 1 127 32767 2147483647 9223372036854775807 127 32767 2147483647 9223372036854775807 127 32767 21474 92233 127 32767 2147483647 9223372036854775807 127 32767 21474 92233 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 834 203332 888888857923222 23 834 203332 888888857923222 23 834 20333 88888 23 834 203332 888888857923222 23 834 20333 88888 original -105 1 -99 -28300 -999992 -222282153733 -99 -28300 -999992 -222282153733 -99 -2830 -9999 -2222 -99 -28300 -999992 -222282153733 -99 -2830 -9999 -2222 original -111 1 filler filler filler filler filler filler filler filler fille fille fille fille filler filler filler filler fille fille fille fille new -PREHOOK: query: drop table part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: query: drop table part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_multi_ints_string_group -PREHOOK: query: CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, - c1 decimal(38,18), c2 float, c3 double, - c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, - c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, - b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, - c1 decimal(38,18), c2 float, c3 double, - c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, - c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, - b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, - decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, - decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num decimal1 float1 double1 decimal1 float1 double1 decimal1 float1 double1 decimal1 float1 double1 decimal1 float1 double1 _c16 -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 b -101 1 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 original -102 1 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 original -105 1 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 original -PREHOOK: query: alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, - c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), - c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), - b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, - c1 STRING, c2 STRING, c3 STRING, - c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), - c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), - b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: query: insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group@part=1 -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c11 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c12 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c13 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c14 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c15 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 _col12 _col13 _col14 _col15 _col16 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_numeric_group_string_group_floating_string_group - Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50), 6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50), 11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7), 15:c15:varchar(7), 16:b:string, 17:part:int, 18:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 17 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 b -101 1 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 9999999 Infinit 1.79769 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 9999999 Infinit 1.79769 original -102 1 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -999999 -Infini -1.7976 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -999999 -Infini -1.7976 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 66475.561431 -100.35978 30.774 66475.561431 -100.35978 30.774 66475.5 -100.35 30.774 66475.561431 -100.35978 30.774 66475.5 -100.35 30.774 original -105 1 9250340.75 NULL 46114.28 9250340.75 NULL 46114.28 9250340 NULL 46114.2 9250340.75 NULL 46114.28 9250340 NULL 46114.2 original -111 1 filler filler filler filler filler filler filler filler filler filler filler filler filler filler filler new -PREHOOK: query: drop table part_change_numeric_group_string_group_floating_string_group -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: drop table part_change_numeric_group_string_group_floating_string_group -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group -POSTHOOK: Output: default@part_change_numeric_group_string_group_floating_string_group -PREHOOK: query: CREATE TABLE part_change_string_group_string_group_string(insert_num int, - c1 string, c2 string, c3 string, c4 string, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), - c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_string_group_string_group_string -POSTHOOK: query: CREATE TABLE part_change_string_group_string_group_string(insert_num int, - c1 string, c2 string, c3 string, c4 string, - c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), - c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_string_group_string_group_string -PREHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, - string2, string2, string2, string2, - string2, string2, string2, - string2, string2, string2, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, - string2, string2, string2, string2, - string2, string2, string2, - string2, string2, string2, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c10 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c5 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c6 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c7 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c8 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 EXPRESSION [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string2, type:string, comment:null), ] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 b -101 1 freckled freckled freckled freckled original -102 1 ox ox ox ox original -103 1 original -104 1 I cooked I cooked I cooked I cooked original -105 1 200 200 200 200 original -PREHOOK: query: alter table part_change_string_group_string_group_string replace columns (insert_num int, - c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), - c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, - c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Output: default@part_change_string_group_string_group_string -POSTHOOK: query: alter table part_change_string_group_string_group_string replace columns (insert_num int, - c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), - c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, - c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Output: default@part_change_string_group_string_group_string -PREHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: query: insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, - 'filler', 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'filler', 'filler', 'filler', - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_string_group_string_group_string@part=1 -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_string_group_string_group_string - Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:char(50), 2:c2:char(9), 3:c3:varchar(50), 4:c4:char(9), 5:c5:varchar(50), 6:c6:varchar(9), 7:c7:string, 8:c8:char(50), 9:c9:char(9), 10:c10:string, 11:b:string, 12:part:int, 13:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: char(9)), c5 (type: varchar(50)), c6 (type: varchar(9)), c7 (type: string), c8 (type: char(50)), c9 (type: char(9)), c10 (type: string), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 12 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - dataColumns: insert_num:int, c1:char(50), c2:char(9), c3:varchar(50), c4:char(9), c5:varchar(50), c6:varchar(9), c7:string, c8:char(50), c9:char(9), c10:string, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Input: default@part_change_string_group_string_group_string@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 b -101 1 freckled freckled freckled freckled freckled freckled freckled freckled freckled freckled original -102 1 ox ox ox ox ox ox ox ox ox ox original -103 1 original -104 1 I cooked I cooked I cooked I cooked I cooked I cooked I cooked I cooked I cooked I cooked original -105 1 200 200 200 200 200 200 200 200 200 200 original -111 1 filler filler filler filler filler filler filler filler filler filler new -PREHOOK: query: drop table part_change_string_group_string_group_string -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_string_group_string_group_string -PREHOOK: Output: default@part_change_string_group_string_group_string -POSTHOOK: query: drop table part_change_string_group_string_group_string -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_string_group_string_group_string -POSTHOOK: Output: default@part_change_string_group_string_group_string -PREHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, - c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, - c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, - c12 int, c13 int, c14 int, c15 int, - c16 bigint, c17 bigint, c18 bigint, - b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, - c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, - c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, - c12 int, c13 int, c14 int, c15 int, - c16 bigint, c17 bigint, c18 bigint, - b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, - tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, - smallint1, smallint1, smallint1, smallint1, smallint1, - int1, int1, int1, int1, - bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, - tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, - smallint1, smallint1, smallint1, smallint1, smallint1, - int1, int1, int1, int1, - bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:int1, type:int, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c16 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c17 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c18 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:bigint1, type:bigint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:tinyint1, type:tinyint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:smallint1, type:smallint, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num tinyint1 tinyint1 tinyint1 tinyint1 tinyint1 tinyint1 smallint1 smallint1 smallint1 smallint1 smallint1 int1 int1 int1 int1 bigint1 bigint1 bigint1 _c19 -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 b -101 1 -128 -128 -128 -128 -128 -128 NULL NULL NULL NULL NULL -2147483648 -2147483648 -2147483648 -2147483648 NULL NULL NULL original -102 1 127 127 127 127 127 127 32767 32767 32767 32767 32767 2147483647 2147483647 2147483647 2147483647 9223372036854775807 9223372036854775807 9223372036854775807 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 23 23 23 23 23 834 834 834 834 834 203332 203332 203332 203332 888888857923222 888888857923222 888888857923222 original -105 1 -99 -99 -99 -99 -99 -99 -28300 -28300 -28300 -28300 -28300 -999992 -999992 -999992 -999992 -222282153733 -222282153733 -222282153733 original -PREHOOK: query: alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, - c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, - c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, - c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, - c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, - b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: query: alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, - c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, - c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, - c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, - c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, - b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, - 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, - 80000, 90000000, 1234.5678, 9876.543, 789.321, - 90000000, 1234.5678, 9876.543, 789.321, - 1234.5678, 9876.543, 789.321, - 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, - 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, - 80000, 90000000, 1234.5678, 9876.543, 789.321, - 90000000, 1234.5678, 9876.543, 789.321, - 1234.5678, 9876.543, 789.321, - 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c10 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c11 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c12 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c13 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c14 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c15 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c16 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c17 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c18 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c4 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c5 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c6 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c7 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c8 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 _col5 _col6 _col7 _col8 _col9 _col10 _col11 _col12 _col13 _col14 _col15 _col16 _col17 _col18 _col19 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_lower_to_higher_numeric_group_tinyint_to_bigint - Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:smallint, 2:c2:int, 3:c3:bigint, 4:c4:decimal(38,18), 5:c5:float, 6:c6:double, 7:c7:int, 8:c8:bigint, 9:c9:decimal(38,18), 10:c10:float, 11:c11:double, 12:c12:bigint, 13:c13:decimal(38,18), 14:c14:float, 15:c15:double, 16:c16:decimal(38,18), 17:c17:float, 18:c18:double, 19:b:string, 20:part:int, 21:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), c5 (type: float), c6 (type: double), c7 (type: int), c8 (type: bigint), c9 (type: decimal(38,18)), c10 (type: float), c11 (type: double), c12 (type: bigint), c13 (type: decimal(38,18)), c14 (type: float), c15 (type: double), c16 (type: decimal(38,18)), c17 (type: float), c18 (type: double), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 20 - includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - dataColumns: insert_num:int, c1:smallint, c2:int, c3:bigint, c4:decimal(38,18), c5:float, c6:double, c7:int, c8:bigint, c9:decimal(38,18), c10:float, c11:double, c12:bigint, c13:decimal(38,18), c14:float, c15:double, c16:decimal(38,18), c17:float, c18:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 b -101 1 -128 -128 -128 -128.000000000000000000 -128.0 -128.0 NULL NULL NULL NULL NULL -2147483648 -2147483648.000000000000000000 -2.14748365E9 -2.147483648E9 NULL NULL NULL original -102 1 127 127 127 127.000000000000000000 127.0 127.0 32767 32767 32767.000000000000000000 32767.0 32767.0 2147483647 2147483647.000000000000000000 2.14748365E9 2.147483647E9 9223372036854775807.000000000000000000 9.223372E18 9.223372036854776E18 original -103 1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original -104 1 23 23 23 23.000000000000000000 23.0 23.0 834 834 834.000000000000000000 834.0 834.0 203332 203332.000000000000000000 203332.0 203332.0 888888857923222.000000000000000000 8.8888885E14 8.88888857923222E14 original -105 1 -99 -99 -99 -99.000000000000000000 -99.0 -99.0 -28300 -28300 -28300.000000000000000000 -28300.0 -28300.0 -999992 -999992.000000000000000000 -999992.0 -999992.0 -222282153733.000000000000000000 -2.22282154E11 -2.22282153733E11 original -111 1 7000 80000 90000000 1234.567800000000000000 9876.543 789.321 80000 90000000 1234.567800000000000000 9876.543 789.321 90000000 1234.567800000000000000 9876.543 789.321 1234.567800000000000000 9876.543 789.321 new -PREHOOK: query: drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: query: drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint -PREHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, - c1 decimal(38,18), c2 decimal(38,18), - c3 float, - b STRING) PARTITIONED BY(part INT) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: query: CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, - c1 decimal(38,18), c2 decimal(38,18), - c3 float, - b STRING) PARTITIONED BY(part INT) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, - decimal1, decimal1, - float1, - 'original' FROM schema_evolution_data -PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, - decimal1, decimal1, - float1, - 'original' FROM schema_evolution_data -POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).b SIMPLE [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -insert_num decimal1 decimal1 float1 _c4 -PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 b -101 1 99999999999999999999.999999999999999999 99999999999999999999.999999999999999999 Infinity original -102 1 -99999999999999999999.999999999999999999 -99999999999999999999.999999999999999999 -Infinity original -103 1 NULL NULL NULL original -104 1 66475.561431000000000000 66475.561431000000000000 -100.35978 original -105 1 9250340.750000000000000000 9250340.750000000000000000 NULL original -PREHOOK: query: alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) -PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: query: alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) -POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: query: insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c1 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c2 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 SCRIPT [] -POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num SCRIPT [] -_col0 _col1 _col2 _col3 _col4 -PREHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -Explain -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: part_change_lower_to_higher_numeric_group_decimal_to_float - Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:insert_num:int, 1:c1:float, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int, 6:ROW__ID:struct] - Select Operator - expressions: insert_num (type: int), part (type: int), c1 (type: float), c2 (type: double), c3 (type: double), b (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized, llap - LLAP IO: all inputs - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 5 - includeColumns: [0, 1, 2, 3, 4] - dataColumns: insert_num:int, c1:float, c2:double, c3:double, b:string - partitionColumnCount: 1 - partitionColumns: part:int - scratchColumnTypeNames: [] - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: QUERY -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -POSTHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float@part=1 -#### A masked pattern was here #### -insert_num part c1 c2 c3 b -101 1 1.0E20 1.0E20 Infinity original -102 1 -1.0E20 -1.0E20 -Infinity original -103 1 NULL NULL NULL original -104 1 66475.56 66475.561431 -100.35978 original -105 1 9250341.0 9250340.75 NULL original -111 1 1234.5677 9876.543 1234.5678 new -PREHOOK: query: drop table part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -PREHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: query: drop table part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float -POSTHOOK: Output: default@part_change_lower_to_higher_numeric_group_decimal_to_float diff --git ql/src/test/results/clientpositive/llap/singletsinsertorc.q.out ql/src/test/results/clientpositive/llap/singletsinsertorc.q.out deleted file mode 100644 index 0322eca245..0000000000 --- ql/src/test/results/clientpositive/llap/singletsinsertorc.q.out +++ /dev/null @@ -1,28 +0,0 @@ -PREHOOK: query: CREATE TABLE myorctable(ts timestamp) -STORED AS ORC -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@myorctable -POSTHOOK: query: CREATE TABLE myorctable(ts timestamp) -STORED AS ORC -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@myorctable -PREHOOK: query: INSERT INTO myorctable VALUES ('1970-01-01 00:00:00') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@myorctable -POSTHOOK: query: INSERT INTO myorctable VALUES ('1970-01-01 00:00:00') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@myorctable -POSTHOOK: Lineage: myorctable.ts SCRIPT [] -PREHOOK: query: SELECT * FROM myorctable -PREHOOK: type: QUERY -PREHOOK: Input: default@myorctable -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM myorctable -POSTHOOK: type: QUERY -POSTHOOK: Input: default@myorctable -#### A masked pattern was here #### -1970-01-01 00:00:00 diff --git ql/src/test/results/clientpositive/llap/special_character_in_tabnames_1.q.out ql/src/test/results/clientpositive/llap/special_character_in_tabnames_1.q.out deleted file mode 100644 index 2faff5593f..0000000000 --- ql/src/test/results/clientpositive/llap/special_character_in_tabnames_1.q.out +++ /dev/null @@ -1,19416 +0,0 @@ -PREHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@c/b/o_t1 -POSTHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@c/b/o_t1 -PREHOOK: query: create table `//cbo_t2`(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@//cbo_t2 -POSTHOOK: query: create table `//cbo_t2`(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@//cbo_t2 -PREHOOK: query: create table `cbo_/t3////`(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@cbo_/t3//// -POSTHOOK: query: create table `cbo_/t3////`(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@cbo_/t3//// -PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table `c/b/o_t1` partition (dt='2014') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@c/b/o_t1 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table `c/b/o_t1` partition (dt='2014') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@c/b/o_t1 -POSTHOOK: Output: default@c/b/o_t1@dt=2014 -PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table `//cbo_t2` partition (dt='2014') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@//cbo_t2 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table `//cbo_t2` partition (dt='2014') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@//cbo_t2 -POSTHOOK: Output: default@//cbo_t2@dt=2014 -PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table `cbo_/t3////` -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@cbo_/t3//// -POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table `cbo_/t3////` -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@cbo_/t3//// -PREHOOK: query: CREATE TABLE `p/a/r/t`( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@p/a/r/t -POSTHOOK: query: CREATE TABLE `p/a/r/t`( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@p/a/r/t -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table `p/a/r/t` -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@p/a/r/t -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table `p/a/r/t` -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@p/a/r/t -PREHOOK: query: CREATE TABLE `line/item` (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@line/item -POSTHOOK: query: CREATE TABLE `line/item` (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@line/item -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE `line/item` -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@line/item -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE `line/item` -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@line/item -PREHOOK: query: create table `src/_/cbo` as select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@src/_/cbo -POSTHOOK: query: create table `src/_/cbo` as select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@src/_/cbo -POSTHOOK: Lineage: src/_/cbo.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src/_/cbo.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: analyze table `c/b/o_t1` partition (dt) compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Output: default@c/b/o_t1 -PREHOOK: Output: default@c/b/o_t1@dt=2014 -POSTHOOK: query: analyze table `c/b/o_t1` partition (dt) compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Output: default@c/b/o_t1 -POSTHOOK: Output: default@c/b/o_t1@dt=2014 -PREHOOK: query: analyze table `c/b/o_t1` compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Output: default@c/b/o_t1 -PREHOOK: Output: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: analyze table `c/b/o_t1` compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Output: default@c/b/o_t1 -POSTHOOK: Output: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: analyze table `//cbo_t2` partition (dt) compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Output: default@//cbo_t2 -PREHOOK: Output: default@//cbo_t2@dt=2014 -POSTHOOK: query: analyze table `//cbo_t2` partition (dt) compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Output: default@//cbo_t2 -POSTHOOK: Output: default@//cbo_t2@dt=2014 -PREHOOK: query: analyze table `//cbo_t2` compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Output: default@//cbo_t2 -PREHOOK: Output: default@//cbo_t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: analyze table `//cbo_t2` compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Output: default@//cbo_t2 -POSTHOOK: Output: default@//cbo_t2@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: analyze table `cbo_/t3////` compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@cbo_/t3//// -PREHOOK: Output: default@cbo_/t3//// -POSTHOOK: query: analyze table `cbo_/t3////` compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cbo_/t3//// -POSTHOOK: Output: default@cbo_/t3//// -PREHOOK: query: analyze table `cbo_/t3////` compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@cbo_/t3//// -PREHOOK: Output: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: analyze table `cbo_/t3////` compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@cbo_/t3//// -POSTHOOK: Output: default@cbo_/t3//// -#### A masked pattern was here #### -PREHOOK: query: analyze table `src/_/cbo` compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -PREHOOK: Output: default@src/_/cbo -POSTHOOK: query: analyze table `src/_/cbo` compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -POSTHOOK: Output: default@src/_/cbo -PREHOOK: query: analyze table `src/_/cbo` compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@src/_/cbo -PREHOOK: Output: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: analyze table `src/_/cbo` compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@src/_/cbo -POSTHOOK: Output: default@src/_/cbo -#### A masked pattern was here #### -PREHOOK: query: analyze table `p/a/r/t` compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -PREHOOK: Output: default@p/a/r/t -POSTHOOK: query: analyze table `p/a/r/t` compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -POSTHOOK: Output: default@p/a/r/t -PREHOOK: query: analyze table `p/a/r/t` compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@p/a/r/t -PREHOOK: Output: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: analyze table `p/a/r/t` compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@p/a/r/t -POSTHOOK: Output: default@p/a/r/t -#### A masked pattern was here #### -PREHOOK: query: analyze table `line/item` compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@line/item -PREHOOK: Output: default@line/item -POSTHOOK: query: analyze table `line/item` compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@line/item -POSTHOOK: Output: default@line/item -PREHOOK: query: analyze table `line/item` compute statistics for columns -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@line/item -PREHOOK: Output: default@line/item -#### A masked pattern was here #### -POSTHOOK: query: analyze table `line/item` compute statistics for columns -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@line/item -POSTHOOK: Output: default@line/item -#### A masked pattern was here #### -PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 4 2 - 1 4 2 -1 4 12 -1 4 2 -NULL NULL NULL -PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key) R group by y, x -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key) R group by y, x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -5.0 12 1 -5.0 2 3 -NULL NULL 1 -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key order by a) `c/b/o_t1` join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key order by q/10 desc, r asc) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c order by `cbo_/t3////`.c_int+c desc, c -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key order by a) `c/b/o_t1` join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key order by q/10 desc, r asc) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c order by `cbo_/t3////`.c_int+c desc, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) `c/b/o_t1` left outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c having `cbo_/t3////`.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by `cbo_/t3////`.c_int % c asc, `cbo_/t3////`.c_int desc -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) `c/b/o_t1` left outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c having `cbo_/t3////`.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by `cbo_/t3////`.c_int % c asc, `cbo_/t3////`.c_int desc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) `c/b/o_t1` right outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 2) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) `c/b/o_t1` right outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 2) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) `c/b/o_t1` full outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c having `cbo_/t3////`.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by `cbo_/t3////`.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) `c/b/o_t1` full outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c having `cbo_/t3////`.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by `cbo_/t3////`.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `c/b/o_t1` join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `c/b/o_t1` join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -tst1 -PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -tst1 500 -PREHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from `cbo_/t3////` s1 - -UNION ALL - - select 'min' as key, min(c_int) as value from `cbo_/t3////` s2 - - UNION ALL - - select 'avg' as key, avg(c_int) as value from `cbo_/t3////` s3) unionsrc order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from `cbo_/t3////` s1 - -UNION ALL - - select 'min' as key, min(c_int) as value from `cbo_/t3////` s2 - - UNION ALL - - select 'avg' as key, avg(c_int) as value from `cbo_/t3////` s3) unionsrc order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -avg -max -min -PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from `cbo_/t3////` s1 - -UNION ALL - - select 'min' as key, min(c_int) as value from `cbo_/t3////` s2 - - UNION ALL - - select 'avg' as key, avg(c_int) as value from `cbo_/t3////` s3) unionsrc order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from `cbo_/t3////` s1 - -UNION ALL - - select 'min' as key, min(c_int) as value from `cbo_/t3////` s2 - - UNION ALL - - select 'avg' as key, avg(c_int) as value from `cbo_/t3////` s3) unionsrc order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -avg 1.5 -max 3.0 -min 1.0 -PREHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from `cbo_/t3////` s1 - - UNION ALL - - select 'min' as key, min(c_int) as value from `cbo_/t3////` s2 - - UNION ALL - - select 'avg' as key, avg(c_int) as value from `cbo_/t3////` s3) unionsrc group by unionsrc.key order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from `cbo_/t3////` s1 - - UNION ALL - - select 'min' as key, min(c_int) as value from `cbo_/t3////` s2 - - UNION ALL - - select 'avg' as key, avg(c_int) as value from `cbo_/t3////` s3) unionsrc group by unionsrc.key order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -avg 1 -max 1 -min 1 -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select `c/b/o_t1`.key from `c/b/o_t1` join `cbo_/t3////` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.key from `c/b/o_t1` join `cbo_/t3////` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -PREHOOK: query: select `c/b/o_t1`.key from `c/b/o_t1` join `cbo_/t3////` where `c/b/o_t1`.key=`cbo_/t3////`.key and `c/b/o_t1`.key >= 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.key from `c/b/o_t1` join `cbo_/t3////` where `c/b/o_t1`.key=`cbo_/t3////`.key and `c/b/o_t1`.key >= 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` left outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` left outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL NULL -NULL NULL -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` right outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` right outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL NULL -NULL NULL -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` full outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` full outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -PREHOOK: query: select a, `c/b/o_t1`.b, key, `//cbo_t2`.c_int, `cbo_/t3////`.p from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.a=key join (select key as p, c_int as q, `cbo_/t3////`.c_float as r from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.a=`cbo_/t3////`.p -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select a, `c/b/o_t1`.b, key, `//cbo_t2`.c_int, `cbo_/t3////`.p from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.a=key join (select key as p, c_int as q, `cbo_/t3////`.c_float as r from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.a=`cbo_/t3////`.p -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -PREHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.c_int, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.a=`//cbo_t2`.key join `cbo_/t3////` on `c/b/o_t1`.a=`cbo_/t3////`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.c_int, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.a=`//cbo_t2`.key join `cbo_/t3////` on `c/b/o_t1`.a=`cbo_/t3////`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -PREHOOK: query: select `cbo_/t3////`.c_int, b, `//cbo_t2`.c_int, `c/b/o_t1`.c from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.a=`//cbo_t2`.key join `cbo_/t3////` on `c/b/o_t1`.a=`cbo_/t3////`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, b, `//cbo_t2`.c_int, `c/b/o_t1`.c from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.a=`//cbo_t2`.key join `cbo_/t3////` on `c/b/o_t1`.a=`cbo_/t3////`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -PREHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p left outer join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p left outer join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -PREHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p right outer join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p right outer join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -PREHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1`) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p full outer join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select key, `c/b/o_t1`.c_int, `//cbo_t2`.p, q from `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2`) `//cbo_t2` on `c/b/o_t1`.key=p full outer join (select key as a, c_int as b, `cbo_/t3////`.c_float as c from `cbo_/t3////`)`cbo_/t3////` on `c/b/o_t1`.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` left outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` left outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` right outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` right outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` full outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int, `//cbo_t2`.c_int from `c/b/o_t1` full outer join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + `//cbo_t2`.c_int == 2) and (`c/b/o_t1`.c_int > 0 or `//cbo_t2`.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or `//cbo_t2`.q >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select b, `c/b/o_t1`.c, `//cbo_t2`.p, q, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or `//cbo_t2`.q >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` right outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, `//cbo_t2`.p, `c/b/o_t1`.c, `cbo_/t3////`.c_int from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` full outer join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key order by x limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key order by x limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -NULL NULL NULL -PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key) R group by y, x order by x,y limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from `c/b/o_t1` group by c_float, `c/b/o_t1`.c_int, key) R group by y, x order by x,y limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -NULL NULL 1 -PREHOOK: query: select key from(select key from (select key from `c/b/o_t1` limit 5)`//cbo_t2` limit 5)`cbo_/t3////` limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key from(select key from (select key from `c/b/o_t1` limit 5)`//cbo_t2` limit 5)`cbo_/t3////` limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from `c/b/o_t1` order by c_int limit 5)`c/b/o_t1` order by c_int limit 5)`//cbo_t2` order by c_int limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from `c/b/o_t1` order by c_int limit 5)`c/b/o_t1` order by c_int limit 5)`//cbo_t2` order by c_int limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -NULL NULL -NULL NULL -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key order by a limit 5) `c/b/o_t1` join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key order by q/10 desc, r asc limit 5) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c order by `cbo_/t3////`.c_int+c desc, c limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key order by a limit 5) `c/b/o_t1` join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key order by q/10 desc, r asc limit 5) `//cbo_t2` on `c/b/o_t1`.a=p join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c order by `cbo_/t3////`.c_int+c desc, c limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) `c/b/o_t1` left outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c having `cbo_/t3////`.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by `cbo_/t3////`.c_int % c asc, `cbo_/t3////`.c_int, c desc limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select `cbo_/t3////`.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) `c/b/o_t1` left outer join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `//cbo_t2`.q >= 0) and (b > 0 or c_int >= 0) group by `cbo_/t3////`.c_int, c having `cbo_/t3////`.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by `cbo_/t3////`.c_int % c asc, `cbo_/t3////`.c_int, c desc limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 12 6 -1 2 6 -PREHOOK: query: select `c/b/o_t1`.c_int from `c/b/o_t1` left semi join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int from `c/b/o_t1` left semi join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select `c/b/o_t1`.c_int from `c/b/o_t1` left semi join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int from `c/b/o_t1` left semi join `//cbo_t2` on `c/b/o_t1`.key=`//cbo_t2`.key where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left semi join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left semi join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -PREHOOK: query: select * from (select `cbo_/t3////`.c_int, `c/b/o_t1`.c, b from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 = 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `cbo_/t3////`.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select `cbo_/t3////`.c_int, `c/b/o_t1`.c, b from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 = 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p left outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + `cbo_/t3////`.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -PREHOOK: query: select * from (select c_int, b, `c/b/o_t1`.c from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c_int, b, `c/b/o_t1`.c from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p right outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -PREHOOK: query: select * from (select c_int, b, `c/b/o_t1`.c from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c_int, b, `c/b/o_t1`.c from (select key as a, c_int as b, `c/b/o_t1`.c_float as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 == 2) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0)) `c/b/o_t1` left semi join (select `//cbo_t2`.key as p, `//cbo_t2`.c_int as q, c_float as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 == 2) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0)) `//cbo_t2` on `c/b/o_t1`.a=p full outer join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -PREHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) `c/b/o_t1` left semi join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) `//cbo_t2` on `c/b/o_t1`.a=p left semi join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) `c/b/o_t1` left semi join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) `//cbo_t2` on `c/b/o_t1`.a=p left semi join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 2 1 - 1 2 1 -1 12 1 -1 2 1 -PREHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) `c/b/o_t1` left semi join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) `//cbo_t2` on `c/b/o_t1`.a=p left semi join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from `c/b/o_t1` where (`c/b/o_t1`.c_int + 1 >= 0) and (`c/b/o_t1`.c_int > 0 or `c/b/o_t1`.c_float >= 0) group by c_float, `c/b/o_t1`.c_int, key having `c/b/o_t1`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) `c/b/o_t1` left semi join (select key as p, c_int+1 as q, sum(c_int) as r from `//cbo_t2` where (`//cbo_t2`.c_int + 1 >= 0) and (`//cbo_t2`.c_int > 0 or `//cbo_t2`.c_float >= 0) group by c_float, `//cbo_t2`.c_int, key having `//cbo_t2`.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) `//cbo_t2` on `c/b/o_t1`.a=p left semi join `cbo_/t3////` on `c/b/o_t1`.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 2 1 - 1 2 1 -1 12 1 -1 2 1 -PREHOOK: query: select * from `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -NULL NULL NULL NULL NULL 2014 -NULL NULL NULL NULL NULL 2014 -PREHOOK: query: select * from `c/b/o_t1` as `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` as `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -NULL NULL NULL NULL NULL 2014 -NULL NULL NULL NULL NULL 2014 -PREHOOK: query: select * from `c/b/o_t1` as `//cbo_t2` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` as `//cbo_t2` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -NULL NULL NULL NULL NULL 2014 -NULL NULL NULL NULL NULL 2014 -PREHOOK: query: select `c/b/o_t1`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -NULL NULL NULL -NULL NULL NULL -PREHOOK: query: select * from `c/b/o_t1` where (((key=1) and (c_float=10)) and (c_int=20)) -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` where (((key=1) and (c_float=10)) and (c_int=20)) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select * from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select * from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and y+c_int >= 0 or x <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and y+c_int >= 0 or x <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select `c/b/o_t1`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: select `//cbo_t2`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `//cbo_t2`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: select * from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -PREHOOK: query: select * from (select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and y+c_int >= 0 or x <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select `//cbo_t2`.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from `c/b/o_t1` as `//cbo_t2` where `//cbo_t2`.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 and y+c_int >= 0 or x <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select `c/b/o_t1`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `c/b/o_t1` where `c/b/o_t1`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: select `//cbo_t2`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select `//cbo_t2`.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from `c/b/o_t1` where `c/b/o_t1`.c_int >= 0) as `//cbo_t2` where `//cbo_t2`.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: select null from `cbo_/t3////` -PREHOOK: type: QUERY -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select null from `cbo_/t3////` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -PREHOOK: query: select key from `c/b/o_t1` where c_int = -6 or c_int = +6 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key from `c/b/o_t1` where c_int = -6 or c_int = +6 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: select count(`c/b/o_t1`.dt) from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.dt = `//cbo_t2`.dt where `c/b/o_t1`.dt = '2014' -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(`c/b/o_t1`.dt) from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.dt = `//cbo_t2`.dt where `c/b/o_t1`.dt = '2014' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -400 -PREHOOK: query: select `c/b/o_t1`.value from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.key = `//cbo_t2`.key where `c/b/o_t1`.dt = '10' and `c/b/o_t1`.c_boolean = true -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -#### A masked pattern was here #### -POSTHOOK: query: select `c/b/o_t1`.value from `c/b/o_t1` join `//cbo_t2` on `c/b/o_t1`.key = `//cbo_t2`.key where `c/b/o_t1`.dt = '10' and `c/b/o_t1`.c_boolean = true -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -#### A masked pattern was here #### -PREHOOK: query: select * - -from `src/_/cbo` b - -where not exists - - (select distinct a.key - - from `src/_/cbo` a - - where b.value = a.value and a.value > 'val_2' - - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from `src/_/cbo` b - -where not exists - - (select distinct a.key - - from `src/_/cbo` a - - where b.value = a.value and a.value > 'val_2' - - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -10 val_10 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -15 val_15 -15 val_15 -150 val_150 -152 val_152 -152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -17 val_17 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -18 val_18 -18 val_18 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -19 val_19 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -2 val_2 -PREHOOK: query: select * - -from `src/_/cbo` b - -group by key, value - -having not exists - - (select a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key = b.key and a.value > 'val_12' - - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from `src/_/cbo` b - -group by key, value - -having not exists - - (select a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key = b.key and a.value > 'val_12' - - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -119 val_119 -12 val_12 -PREHOOK: query: create view cv1 as - -select * - -from `src/_/cbo` b - -where exists - - (select a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key = b.key and a.value > 'val_9') -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src/_/cbo -PREHOOK: Output: database:default -PREHOOK: Output: default@cv1 -POSTHOOK: query: create view cv1 as - -select * - -from `src/_/cbo` b - -where exists - - (select a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key = b.key and a.value > 'val_9') -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src/_/cbo -POSTHOOK: Output: database:default -POSTHOOK: Output: default@cv1 -POSTHOOK: Lineage: cv1.key SIMPLE [(src/_/cbo)b.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: cv1.value SIMPLE [(src/_/cbo)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from cv1 -PREHOOK: type: QUERY -PREHOOK: Input: default@cv1 -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * from cv1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cv1 -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: select * - -from (select * - - from `src/_/cbo` b - - where exists - - (select a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key = b.key and a.value > 'val_9') - - ) a -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from (select * - - from `src/_/cbo` b - - where exists - - (select a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key = b.key and a.value > 'val_9') - - ) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: select * - -from (select b.key, count(*) - - from `src/_/cbo` b - - group by b.key - - having exists - - (select a.key - - from `src/_/cbo` a - - where a.key = b.key and a.value > 'val_9' - - ) - -) a -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from (select b.key, count(*) - - from `src/_/cbo` b - - group by b.key - - having exists - - (select a.key - - from `src/_/cbo` a - - where a.key = b.key and a.value > 'val_9' - - ) - -) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -90 3 -92 1 -95 2 -96 1 -97 2 -98 2 -PREHOOK: query: select * - -from `src/_/cbo` - -where `src/_/cbo`.key in (select key from `src/_/cbo` s1 where s1.key > '9') order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from `src/_/cbo` - -where `src/_/cbo`.key in (select key from `src/_/cbo` s1 where s1.key > '9') order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: select * - -from `src/_/cbo` b - -where b.key in - - (select distinct a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key > '9' - - ) order by b.key -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from `src/_/cbo` b - -where b.key in - - (select distinct a.key - - from `src/_/cbo` a - - where b.value = a.value and a.key > '9' - - ) order by b.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: select p.p_partkey, li.l_suppkey - -from (select distinct l_partkey as p_partkey from `line/item`) p join `line/item` li on p.p_partkey = li.l_partkey - -where li.l_linenumber = 1 and - - li.l_orderkey in (select l_orderkey from `line/item` where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) - - order by p.p_partkey -PREHOOK: type: QUERY -PREHOOK: Input: default@line/item -#### A masked pattern was here #### -POSTHOOK: query: select p.p_partkey, li.l_suppkey - -from (select distinct l_partkey as p_partkey from `line/item`) p join `line/item` li on p.p_partkey = li.l_partkey - -where li.l_linenumber = 1 and - - li.l_orderkey in (select l_orderkey from `line/item` where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) - - order by p.p_partkey -POSTHOOK: type: QUERY -POSTHOOK: Input: default@line/item -#### A masked pattern was here #### -108570 8571 -4297 1798 -PREHOOK: query: select key, value, count(*) - -from `src/_/cbo` b - -where b.key in (select key from `src/_/cbo` where `src/_/cbo`.key > '8') - -group by key, value - -having count(*) in (select count(*) from `src/_/cbo` s1 where s1.key > '9' group by s1.key ) order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select key, value, count(*) - -from `src/_/cbo` b - -where b.key in (select key from `src/_/cbo` where `src/_/cbo`.key > '8') - -group by key, value - -having count(*) in (select count(*) from `src/_/cbo` s1 where s1.key > '9' group by s1.key ) order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -80 val_80 1 -82 val_82 1 -83 val_83 2 -84 val_84 2 -85 val_85 1 -86 val_86 1 -87 val_87 1 -9 val_9 1 -90 val_90 3 -92 val_92 1 -95 val_95 2 -96 val_96 1 -97 val_97 2 -98 val_98 2 -PREHOOK: query: select p_mfgr, p_name, avg(p_size) - -from `p/a/r/t` - -group by p_mfgr, p_name - -having p_name in - - (select first_value(p_name) over(partition by p_mfgr order by p_size) from `p/a/r/t`) order by p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: select p_mfgr, p_name, avg(p_size) - -from `p/a/r/t` - -group by p_mfgr, p_name - -having p_name in - - (select first_value(p_name) over(partition by p_mfgr order by p_size) from `p/a/r/t`) order by p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -Manufacturer#1 almond antique burnished rose metallic 2.0 -Manufacturer#2 almond aquamarine midnight light salmon 2.0 -Manufacturer#3 almond antique misty red olive 1.0 -Manufacturer#4 almond aquamarine yellow dodger mint 7.0 -Manufacturer#5 almond antique sky peru orange 2.0 -PREHOOK: query: select * - -from `src/_/cbo` - -where `src/_/cbo`.key not in - - ( select key from `src/_/cbo` s1 - - where s1.key > '2' - - ) order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * - -from `src/_/cbo` - -where `src/_/cbo`.key not in - - ( select key from `src/_/cbo` s1 - - where s1.key > '2' - - ) order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -10 val_10 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -15 val_15 -15 val_15 -150 val_150 -152 val_152 -152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -17 val_17 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -18 val_18 -18 val_18 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -19 val_19 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -2 val_2 -PREHOOK: query: select p_mfgr, b.p_name, p_size - -from `p/a/r/t` b - -where b.p_name not in - - (select p_name - - from (select p_mfgr, p_name, p_size as r from `p/a/r/t`) a - - where r < 10 and b.p_mfgr = a.p_mfgr - - ) order by p_mfgr,p_size -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: select p_mfgr, b.p_name, p_size - -from `p/a/r/t` b - -where b.p_name not in - - (select p_name - - from (select p_mfgr, p_name, p_size as r from `p/a/r/t`) a - - where r < 10 and b.p_mfgr = a.p_mfgr - - ) order by p_mfgr,p_size -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -Manufacturer#1 almond antique chartreuse lavender yellow 34 -Manufacturer#1 almond aquamarine burnished black steel 28 -Manufacturer#1 almond aquamarine pink moccasin thistle 42 -Manufacturer#2 almond antique violet chocolate turquoise 14 -Manufacturer#2 almond antique violet turquoise frosted 40 -Manufacturer#2 almond aquamarine rose maroon antique 25 -Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -Manufacturer#3 almond antique chartreuse khaki white 17 -Manufacturer#3 almond antique forest lavender goldenrod 14 -Manufacturer#3 almond antique metallic orange dim 19 -Manufacturer#3 almond antique olive coral navajo 45 -Manufacturer#4 almond antique gainsboro frosted violet 10 -Manufacturer#4 almond antique violet mint lemon 39 -Manufacturer#4 almond aquamarine floral ivory bisque 27 -Manufacturer#4 almond azure aquamarine papaya violet 12 -Manufacturer#5 almond antique blue firebrick mint 31 -Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: select p_name, p_size - -from - -`p/a/r/t` where `p/a/r/t`.p_size not in - - (select avg(p_size) - - from (select p_size from `p/a/r/t`) a - - where p_size < 10 - - ) order by p_name -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: select p_name, p_size - -from - -`p/a/r/t` where `p/a/r/t`.p_size not in - - (select avg(p_size) - - from (select p_size from `p/a/r/t`) a - - where p_size < 10 - - ) order by p_name -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -almond antique blue firebrick mint 31 -almond antique burnished rose metallic 2 -almond antique burnished rose metallic 2 -almond antique chartreuse khaki white 17 -almond antique chartreuse lavender yellow 34 -almond antique forest lavender goldenrod 14 -almond antique gainsboro frosted violet 10 -almond antique medium spring khaki 6 -almond antique metallic orange dim 19 -almond antique misty red olive 1 -almond antique olive coral navajo 45 -almond antique salmon chartreuse burlywood 6 -almond antique sky peru orange 2 -almond antique violet chocolate turquoise 14 -almond antique violet mint lemon 39 -almond antique violet turquoise frosted 40 -almond aquamarine burnished black steel 28 -almond aquamarine dodger light gainsboro 46 -almond aquamarine floral ivory bisque 27 -almond aquamarine midnight light salmon 2 -almond aquamarine pink moccasin thistle 42 -almond aquamarine rose maroon antique 25 -almond aquamarine sandy cyan gainsboro 18 -almond aquamarine yellow dodger mint 7 -almond azure aquamarine papaya violet 12 -almond azure blanched chiffon midnight 23 -PREHOOK: query: select p_mfgr, p_name, p_size - -from `p/a/r/t` b where b.p_size not in - - (select min(p_size) - - from (select p_mfgr, p_size from `p/a/r/t`) a - - where p_size < 10 and b.p_mfgr = a.p_mfgr - - ) order by p_name -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: select p_mfgr, p_name, p_size - -from `p/a/r/t` b where b.p_size not in - - (select min(p_size) - - from (select p_mfgr, p_size from `p/a/r/t`) a - - where p_size < 10 and b.p_mfgr = a.p_mfgr - - ) order by p_name -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -Manufacturer#1 almond antique chartreuse lavender yellow 34 -Manufacturer#1 almond antique salmon chartreuse burlywood 6 -Manufacturer#1 almond aquamarine burnished black steel 28 -Manufacturer#1 almond aquamarine pink moccasin thistle 42 -Manufacturer#2 almond antique violet chocolate turquoise 14 -Manufacturer#2 almond antique violet turquoise frosted 40 -Manufacturer#2 almond aquamarine rose maroon antique 25 -Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -Manufacturer#3 almond antique chartreuse khaki white 17 -Manufacturer#3 almond antique forest lavender goldenrod 14 -Manufacturer#3 almond antique metallic orange dim 19 -Manufacturer#3 almond antique olive coral navajo 45 -Manufacturer#4 almond antique gainsboro frosted violet 10 -Manufacturer#4 almond antique violet mint lemon 39 -Manufacturer#4 almond aquamarine floral ivory bisque 27 -Manufacturer#4 almond azure aquamarine papaya violet 12 -Manufacturer#5 almond antique blue firebrick mint 31 -Manufacturer#5 almond antique medium spring khaki 6 -Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: select li.l_partkey, count(*) - -from `line/item` li - -where li.l_linenumber = 1 and - - li.l_orderkey not in (select l_orderkey from `line/item` where l_shipmode = 'AIR') - -group by li.l_partkey order by li.l_partkey -PREHOOK: type: QUERY -PREHOOK: Input: default@line/item -#### A masked pattern was here #### -POSTHOOK: query: select li.l_partkey, count(*) - -from `line/item` li - -where li.l_linenumber = 1 and - - li.l_orderkey not in (select l_orderkey from `line/item` where l_shipmode = 'AIR') - -group by li.l_partkey order by li.l_partkey -POSTHOOK: type: QUERY -POSTHOOK: Input: default@line/item -#### A masked pattern was here #### -106170 1 -119477 1 -119767 1 -123076 1 -139636 1 -175839 1 -182052 1 -21636 1 -22630 1 -450 1 -59694 1 -61931 1 -7068 1 -85951 1 -88035 1 -88362 1 -PREHOOK: query: select b.p_mfgr, min(p_retailprice) - -from `p/a/r/t` b - -group by b.p_mfgr - -having b.p_mfgr not in - - (select p_mfgr - - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from `p/a/r/t` group by p_mfgr) a - - where min(p_retailprice) = l and r - l > 600 - - ) - - order by b.p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: select b.p_mfgr, min(p_retailprice) - -from `p/a/r/t` b - -group by b.p_mfgr - -having b.p_mfgr not in - - (select p_mfgr - - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from `p/a/r/t` group by p_mfgr) a - - where min(p_retailprice) = l and r - l > 600 - - ) - - order by b.p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -Manufacturer#1 1173.15 -Manufacturer#2 1690.68 -PREHOOK: query: select b.p_mfgr, min(p_retailprice) - -from `p/a/r/t` b - -group by b.p_mfgr - -having b.p_mfgr not in - - (select p_mfgr - - from `p/a/r/t` a - - group by p_mfgr - - having max(p_retailprice) - min(p_retailprice) > 600 - - ) - - order by b.p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -POSTHOOK: query: select b.p_mfgr, min(p_retailprice) - -from `p/a/r/t` b - -group by b.p_mfgr - -having b.p_mfgr not in - - (select p_mfgr - - from `p/a/r/t` a - - group by p_mfgr - - having max(p_retailprice) - min(p_retailprice) > 600 - - ) - - order by b.p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@p/a/r/t -#### A masked pattern was here #### -Manufacturer#1 1173.15 -Manufacturer#2 1690.68 -PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -20 18 18 1.0 1 1 -PREHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from `c/b/o_t1` group by c_int order by a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from `c/b/o_t1` group by c_int order by a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -18 18 18 1.0 1 1 2 36 -2 0 NULL NULL NULL NULL 3 6 -PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from `c/b/o_t1`) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from `c/b/o_t1`) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -20 1 18 1.0 1 1 -PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from `c/b/o_t1` group by c_int) `c/b/o_t1` order by a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from `c/b/o_t1` group by c_int) `c/b/o_t1` order by a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -18 1 18 1.0 1 1 2 36 -2 0 NULL NULL NULL NULL 3 6 -PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from `c/b/o_t1`) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from `c/b/o_t1`) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 20 1 18 -PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from `c/b/o_t1`) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from `c/b/o_t1`) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 20 1 1 -PREHOOK: query: select key,count(c_int) as a, avg(c_float) from `c/b/o_t1` group by key order by a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key,count(c_int) as a, avg(c_float) from `c/b/o_t1` group by key order by a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 2 1.0 - 1 2 1.0 -1 12 1.0 -1 2 1.0 -NULL 0 NULL -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from `c/b/o_t1` group by c_float order by a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from `c/b/o_t1` group by c_float order by a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from `c/b/o_t1` group by c_int order by a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from `c/b/o_t1` group by c_int order by a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from `c/b/o_t1` group by c_float, c_int order by a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from `c/b/o_t1` group by c_float, c_int order by a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select * from (select * from `c/b/o_t1` order by key, c_boolean, value, dt)a union all select * from (select * from `//cbo_t2` order by key, c_boolean, value, dt)b -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from `c/b/o_t1` order by key, c_boolean, value, dt)a union all select * from (select * from `//cbo_t2` order by key, c_boolean, value, dt)b -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -NULL NULL NULL NULL NULL 2014 -NULL NULL NULL NULL NULL 2014 -NULL NULL NULL NULL NULL 2014 -NULL NULL NULL NULL NULL 2014 -PREHOOK: query: select key from (select key, c_int from (select * from `c/b/o_t1` union all select * from `//cbo_t2` where `//cbo_t2`.key >=0)r1 union all select key, c_int from `cbo_/t3////`)r2 where key >=0 order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select key from (select key, c_int from (select * from `c/b/o_t1` union all select * from `//cbo_t2` where `//cbo_t2`.key >=0)r1 union all select key, c_int from `cbo_/t3////`)r2 where key >=0 order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -2 -2 -2 -2 -2 -2 -2 -3 -3 -3 -PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from `c/b/o_t1` union all select key, c_int from `cbo_/t3////` )r1 union all select key, c_int from `cbo_/t3////`)r2 join (select key, c_int from (select * from `c/b/o_t1` union all select * from `//cbo_t2` where `//cbo_t2`.key >=0)r1 union all select key, c_int from `cbo_/t3////`)r3 on r2.key=r3.key where r3.key >=0 order by r2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### -POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from `c/b/o_t1` union all select key, c_int from `cbo_/t3////` )r1 union all select key, c_int from `cbo_/t3////`)r2 join (select key, c_int from (select * from `c/b/o_t1` union all select * from `//cbo_t2` where `//cbo_t2`.key >=0)r1 union all select key, c_int from `cbo_/t3////`)r3 on r2.key=r3.key where r3.key >=0 order by r2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@cbo_/t3//// -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -PREHOOK: query: create view v1 as select c_int, value, c_boolean, dt from `c/b/o_t1` -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select c_int, value, c_boolean, dt from `c/b/o_t1` -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.c_boolean SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:c_boolean, type:boolean, comment:null), ] -POSTHOOK: Lineage: v1.c_int SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:c_int, type:int, comment:null), ] -POSTHOOK: Lineage: v1.dt SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:dt, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select c_int, value from `//cbo_t2` -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select c_int, value from `//cbo_t2` -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.c_int SIMPLE [(//cbo_t2)//cbo_t2.FieldSchema(name:c_int, type:int, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(//cbo_t2)//cbo_t2.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select value from v1 where c_boolean=false -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select value from v1 where c_boolean=false -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -1 -1 -PREHOOK: query: select max(c_int) from v1 group by (c_boolean) -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -1 -1 -NULL -PREHOOK: query: select count(v1.c_int) from v1 join `//cbo_t2` on v1.c_int = `//cbo_t2`.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select count(v1.c_int) from v1 join `//cbo_t2` on v1.c_int = `//cbo_t2`.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -234 -PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@//cbo_t2 -PREHOOK: Input: default@//cbo_t2@dt=2014 -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 -#### A masked pattern was here #### -POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@//cbo_t2 -POSTHOOK: Input: default@//cbo_t2@dt=2014 -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 -#### A masked pattern was here #### -234 -PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -156 -PREHOOK: query: create view v3 as select v1.value val from v1 join `c/b/o_t1` on v1.c_boolean = `c/b/o_t1`.c_boolean -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@v1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v3 -POSTHOOK: query: create view v3 as select v1.value val from v1 join `c/b/o_t1` on v1.c_boolean = `c/b/o_t1`.c_boolean -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@v1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v3 -POSTHOOK: Lineage: v3.val SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(val) from v3 where val != '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v3 -#### A masked pattern was here #### -POSTHOOK: query: select count(val) from v3 where val != '1' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v3 -#### A masked pattern was here #### -96 -PREHOOK: query: with q1 as ( select key from `c/b/o_t1` where key = '1') - -select count(*) from q1 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select key from `c/b/o_t1` where key = '1') - -select count(*) from q1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -12 -PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) - -select count(value) from q1 -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) - -select count(value) from q1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -2 -PREHOOK: query: create view v4 as - -with q1 as ( select key,c_int from `c/b/o_t1` where key = '1') - -select * from q1 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v4 -POSTHOOK: query: create view v4 as - -with q1 as ( select key,c_int from `c/b/o_t1` where key = '1') - -select * from q1 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v4 -POSTHOOK: Lineage: v4.c_int SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:c_int, type:int, comment:null), ] -POSTHOOK: Lineage: v4.key SIMPLE [(c/b/o_t1)c/b/o_t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), - -q2 as ( select c_int,c_boolean from v1 where value = '1') - -select sum(c_int) from (select c_int from q1) a -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), - -q2 as ( select c_int,c_boolean from v1 where value = '1') - -select sum(c_int) from (select c_int from q1) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -2 -PREHOOK: query: with q1 as ( select `c/b/o_t1`.c_int c_int from q2 join `c/b/o_t1` where q2.c_int = `c/b/o_t1`.c_int and `c/b/o_t1`.dt='2014'), - -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') - -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v4 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select `c/b/o_t1`.c_int c_int from q2 join `c/b/o_t1` where q2.c_int = `c/b/o_t1`.c_int and `c/b/o_t1`.dt='2014'), - -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') - -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v4 -#### A masked pattern was here #### -31104 -PREHOOK: query: drop view v1 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: drop view v2 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v2 -PREHOOK: Output: default@v2 -POSTHOOK: query: drop view v2 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v2 -POSTHOOK: Output: default@v2 -PREHOOK: query: drop view v3 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v3 -PREHOOK: Output: default@v3 -POSTHOOK: query: drop view v3 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v3 -POSTHOOK: Output: default@v3 -PREHOOK: query: drop view v4 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v4 -PREHOOK: Output: default@v4 -POSTHOOK: query: drop view v4 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v4 -POSTHOOK: Output: default@v4 -PREHOOK: query: select count(c_int) over() from `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(c_int) over() from `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -PREHOOK: query: select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key), 2), lead(c_int, 2, c_int) over(partition by c_float order by key), lag(c_float, 2, c_float) over(partition by c_float order by key) from `c/b/o_t1` order by rn -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key), 2), lead(c_int, 2, c_int) over(partition by c_float order by key), lag(c_float, 2, c_float) over(partition by c_float order by key) from `c/b/o_t1` order by rn -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -0 NULL NULL NULL 1 1 1 0.0 NULL NULL -0 NULL NULL NULL 2 1 1 0.0 NULL NULL -16 16.0 1 1 10 5 3 0.24 1 1.0 -16 16.0 1 1 11 5 3 0.24 1 1.0 -16 16.0 1 1 12 5 3 0.24 1 1.0 -16 16.0 1 1 13 5 3 0.24 1 1.0 -16 16.0 1 1 14 5 3 0.24 1 1.0 -16 16.0 1 1 15 5 3 0.24 1 1.0 -16 16.0 1 1 16 5 3 0.24 1 1.0 -16 16.0 1 1 5 5 3 0.24 1 1.0 -16 16.0 1 1 6 5 3 0.24 1 1.0 -16 16.0 1 1 7 5 3 0.24 1 1.0 -16 16.0 1 1 8 5 3 0.24 1 1.0 -16 16.0 1 1 9 5 3 0.24 1 1.0 -18 18.0 1 1 17 17 4 0.94 1 1.0 -18 18.0 1 1 18 17 4 0.94 1 1.0 -2 2.0 1 1 1 1 1 0.0 1 1.0 -2 2.0 1 1 2 1 1 0.0 1 1.0 -4 4.0 1 1 3 3 2 0.12 1 1.0 -4 4.0 1 1 4 3 2 0.12 1 1.0 -PREHOOK: query: select * from (select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key),2), lead(c_int, 2, c_int) over(partition by c_float order by key ), lag(c_float, 2, c_float) over(partition by c_float order by key) from `c/b/o_t1` order by rn) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key),2), lead(c_int, 2, c_int) over(partition by c_float order by key ), lag(c_float, 2, c_float) over(partition by c_float order by key) from `c/b/o_t1` order by rn) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -0 NULL NULL NULL 1 1 1 0.0 NULL NULL -0 NULL NULL NULL 2 1 1 0.0 NULL NULL -16 16.0 1 1 10 5 3 0.24 1 1.0 -16 16.0 1 1 11 5 3 0.24 1 1.0 -16 16.0 1 1 12 5 3 0.24 1 1.0 -16 16.0 1 1 13 5 3 0.24 1 1.0 -16 16.0 1 1 14 5 3 0.24 1 1.0 -16 16.0 1 1 15 5 3 0.24 1 1.0 -16 16.0 1 1 16 5 3 0.24 1 1.0 -16 16.0 1 1 5 5 3 0.24 1 1.0 -16 16.0 1 1 6 5 3 0.24 1 1.0 -16 16.0 1 1 7 5 3 0.24 1 1.0 -16 16.0 1 1 8 5 3 0.24 1 1.0 -16 16.0 1 1 9 5 3 0.24 1 1.0 -18 18.0 1 1 17 17 4 0.94 1 1.0 -18 18.0 1 1 18 17 4 0.94 1 1.0 -2 2.0 1 1 1 1 1 0.0 1 1.0 -2 2.0 1 1 2 1 1 0.0 1 1.0 -4 4.0 1 1 3 3 2 0.12 1 1.0 -4 4.0 1 1 4 3 2 0.12 1 1.0 -PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from `c/b/o_t1`) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from `c/b/o_t1`) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -PREHOOK: query: select 1+sum(c_int) over() from `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select 1+sum(c_int) over() from `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -36 -PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from `c/b/o_t1`) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from `c/b/o_t1`) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 -NULL NULL 0 NULL NULL NULL NULL NULL NULL -NULL NULL 0 NULL NULL NULL NULL NULL NULL -PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from `c/b/o_t1`) `c/b/o_t1` -PREHOOK: type: QUERY -PREHOOK: Input: default@c/b/o_t1 -PREHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from `c/b/o_t1`) `c/b/o_t1` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@c/b/o_t1 -POSTHOOK: Input: default@c/b/o_t1@dt=2014 -#### A masked pattern was here #### -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL -NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL -PREHOOK: query: select *, rank() over(partition by key order by value) as rr from src1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: select *, rank() over(partition by key order by value) as rr from src1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - val_165 5 - val_193 6 - val_265 7 - val_27 8 - val_409 9 - val_484 10 -128 1 -146 val_146 1 -150 val_150 1 -213 val_213 1 -224 1 -238 val_238 1 -255 val_255 1 -273 val_273 1 -278 val_278 1 -311 val_311 1 -369 1 -401 val_401 1 -406 val_406 1 -66 val_66 1 -98 val_98 1 -PREHOOK: query: select *, rank() over(partition by key order by value) from src1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: select *, rank() over(partition by key order by value) from src1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - val_165 5 - val_193 6 - val_265 7 - val_27 8 - val_409 9 - val_484 10 -128 1 -146 val_146 1 -150 val_150 1 -213 val_213 1 -224 1 -238 val_238 1 -255 val_255 1 -273 val_273 1 -278 val_278 1 -311 val_311 1 -369 1 -401 val_401 1 -406 val_406 1 -66 val_66 1 -98 val_98 1 -PREHOOK: query: insert into table `src/_/cbo` select * from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@src/_/cbo -POSTHOOK: query: insert into table `src/_/cbo` select * from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@src/_/cbo -POSTHOOK: Lineage: src/_/cbo.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src/_/cbo.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from `src/_/cbo` limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * from `src/_/cbo` limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -238 val_238 -PREHOOK: query: insert overwrite table `src/_/cbo` select * from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@src/_/cbo -POSTHOOK: query: insert overwrite table `src/_/cbo` select * from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@src/_/cbo -POSTHOOK: Lineage: src/_/cbo.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src/_/cbo.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from `src/_/cbo` limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -POSTHOOK: query: select * from `src/_/cbo` limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src/_/cbo -#### A masked pattern was here #### -238 val_238 -PREHOOK: query: drop table `t//` -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table `t//` -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table `t//` (col string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t// -POSTHOOK: query: create table `t//` (col string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t// -PREHOOK: query: insert into `t//` values(1) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t// -POSTHOOK: query: insert into `t//` values(1) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t// -POSTHOOK: Lineage: t//.col SCRIPT [] -PREHOOK: query: insert into `t//` values(null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t// -POSTHOOK: query: insert into `t//` values(null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t// -POSTHOOK: Lineage: t//.col SCRIPT [] -PREHOOK: query: analyze table `t//` compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t// -PREHOOK: Output: default@t// -POSTHOOK: query: analyze table `t//` compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t// -POSTHOOK: Output: default@t// -PREHOOK: query: explain select * from `t//` -PREHOOK: type: QUERY -POSTHOOK: query: explain select * from `t//` -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: t// - Select Operator - expressions: col (type: string) - outputColumnNames: _col0 - ListSink - diff --git ql/src/test/results/clientpositive/llap/tez_smb_1.q.out ql/src/test/results/clientpositive/llap/tez_smb_1.q.out deleted file mode 100644 index c5b765a053..0000000000 --- ql/src/test/results/clientpositive/llap/tez_smb_1.q.out +++ /dev/null @@ -1,906 +0,0 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_part -POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_part -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab -POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab -PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: explain -select count(*) from tab s1 join tab s3 on s1.key=s3.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) from tab s1 join tab s3 on s1.key=s3.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: s3 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Map Operator Tree: - TableScan - alias: s1 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 382 Data size: 3056 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: llap - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: explain -select count(*) from -tab vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.key=vt2.id -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) from -tab vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.key=vt2.id -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) - Reducer 5 <- Map 4 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: vt1 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 391 Data size: 3128 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 5 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from -tab vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.key=vt2.id -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from -tab vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.key=vt2.id -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -480 -PREHOOK: query: explain -select count(*) from -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -join -tab vt1 -where vt1.key=vt2.id -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) from -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -join -tab vt1 -where vt1.key=vt2.id -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) - Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: vt1 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 391 Data size: 3128 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -join -tab vt1 -where vt1.key=vt2.id -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -join -tab vt1 -where vt1.key=vt2.id -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -480 -PREHOOK: query: explain -select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) - Reducer 5 <- Reducer 4 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 3 - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 4 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 391 Data size: 3128 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 5 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -480 -PREHOOK: query: explain -select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE) - Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE) - Reducer 6 <- Map 5 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 5 - Map Operator Tree: - TableScan - alias: t2 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 968 Basic stats: COMPLETE Column stats: COMPLETE - Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 391 Data size: 3128 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 6 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from -(select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 -join -(select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 -where vt1.id=vt2.id -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -#### A masked pattern was here #### -480 -PREHOOK: query: CREATE EXTERNAL TABLE tab_ext(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_ext -POSTHOOK: query: CREATE EXTERNAL TABLE tab_ext(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_ext -PREHOOK: query: insert overwrite table tab_ext partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab_ext@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab_ext partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab_ext@ds=2008-04-08 -POSTHOOK: Lineage: tab_ext PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab_ext PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] -test.comment=SMB disabled for external tables -PREHOOK: query: explain -select count(*) from tab_ext s1 join tab_ext s3 on s1.key=s3.key -PREHOOK: type: QUERY -POSTHOOK: query: explain -select count(*) from tab_ext s1 join tab_ext s3 on s1.key=s3.key -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: s1 - Statistics: Num rows: 242 Data size: 3490 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 230 Data size: 3316 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 230 Data size: 3316 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 230 Data size: 3316 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized, llap - LLAP IO: no inputs - Map 4 - Map Operator Tree: - TableScan - alias: s3 - Statistics: Num rows: 242 Data size: 3490 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 230 Data size: 3316 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 230 Data size: 3316 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 230 Data size: 3316 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized, llap - LLAP IO: no inputs - Reducer 2 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 253 Data size: 3647 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - diff --git ql/src/test/results/clientpositive/llap/union_fast_stats.q.out ql/src/test/results/clientpositive/llap/union_fast_stats.q.out deleted file mode 100644 index 40f469be3f..0000000000 --- ql/src/test/results/clientpositive/llap/union_fast_stats.q.out +++ /dev/null @@ -1,664 +0,0 @@ -PREHOOK: query: drop table small_alltypesorc1a_n2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table small_alltypesorc1a_n2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table small_alltypesorc2a_n2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table small_alltypesorc2a_n2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table small_alltypesorc3a_n2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table small_alltypesorc3a_n2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table small_alltypesorc4a_n2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table small_alltypesorc4a_n2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table small_alltypesorc_a_n2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table small_alltypesorc_a_n2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table small_alltypesorc1a_n2 as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc1a_n2 -POSTHOOK: query: create table small_alltypesorc1a_n2 as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc1a_n2 -POSTHOOK: Lineage: small_alltypesorc1a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc2a_n2 as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc2a_n2 -POSTHOOK: query: create table small_alltypesorc2a_n2 as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc2a_n2 -POSTHOOK: Lineage: small_alltypesorc2a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc2a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc3a_n2 as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc3a_n2 -POSTHOOK: query: create table small_alltypesorc3a_n2 as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc3a_n2 -POSTHOOK: Lineage: small_alltypesorc3a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.ctinyint SIMPLE [] -PREHOOK: query: create table small_alltypesorc4a_n2 as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc4a_n2 -POSTHOOK: query: create table small_alltypesorc4a_n2 as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc4a_n2 -POSTHOOK: Lineage: small_alltypesorc4a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc4a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.ctinyint SIMPLE [] -PREHOOK: query: create table small_alltypesorc_a_n2 stored as orc as select * from -(select * from (select * from small_alltypesorc1a_n2) sq1 - union all - select * from (select * from small_alltypesorc2a_n2) sq2 - union all - select * from (select * from small_alltypesorc3a_n2) sq3 - union all - select * from (select * from small_alltypesorc4a_n2) sq4) q -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@small_alltypesorc1a_n2 -PREHOOK: Input: default@small_alltypesorc2a_n2 -PREHOOK: Input: default@small_alltypesorc3a_n2 -PREHOOK: Input: default@small_alltypesorc4a_n2 -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: create table small_alltypesorc_a_n2 stored as orc as select * from -(select * from (select * from small_alltypesorc1a_n2) sq1 - union all - select * from (select * from small_alltypesorc2a_n2) sq2 - union all - select * from (select * from small_alltypesorc3a_n2) sq3 - union all - select * from (select * from small_alltypesorc4a_n2) sq4) q -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@small_alltypesorc1a_n2 -POSTHOOK: Input: default@small_alltypesorc2a_n2 -POSTHOOK: Input: default@small_alltypesorc3a_n2 -POSTHOOK: Input: default@small_alltypesorc4a_n2 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: Lineage: small_alltypesorc_a_n2.cbigint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean1 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean2 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cdouble EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cfloat EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.csmallint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring1 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring2 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp1 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp2 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctinyint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: desc formatted small_alltypesorc_a_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: query: desc formatted small_alltypesorc_a_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -# col_name data_type comment -ctinyint tinyint -csmallint smallint -cint int -cbigint bigint -cfloat float -cdouble double -cstring1 string -cstring2 string -ctimestamp1 timestamp -ctimestamp2 timestamp -cboolean1 boolean -cboolean2 boolean - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 3 - numRows 15 - rawDataSize 3315 - totalSize 4152 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: ANALYZE TABLE small_alltypesorc_a_n2 COMPUTE STATISTICS -PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a_n2 -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a_n2 COMPUTE STATISTICS -POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: Output: default@small_alltypesorc_a_n2 -PREHOOK: query: desc formatted small_alltypesorc_a_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: query: desc formatted small_alltypesorc_a_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -# col_name data_type comment -ctinyint tinyint -csmallint smallint -cint int -cbigint bigint -cfloat float -cdouble double -cstring1 string -cstring2 string -ctimestamp1 timestamp -ctimestamp2 timestamp -cboolean1 boolean -cboolean2 boolean - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 3 - numRows 15 - rawDataSize 3483 - totalSize 4152 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: insert into table small_alltypesorc_a_n2 select * from small_alltypesorc1a_n2 -PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc1a_n2 -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: insert into table small_alltypesorc_a_n2 select * from small_alltypesorc1a_n2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc1a_n2 -POSTHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: Lineage: small_alltypesorc_a_n2.cbigint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean1 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean2 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cdouble SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cfloat SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.csmallint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring1 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring2 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp1 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp2 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctinyint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: desc formatted small_alltypesorc_a_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: query: desc formatted small_alltypesorc_a_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -# col_name data_type comment -ctinyint tinyint -csmallint smallint -cint int -cbigint bigint -cfloat float -cdouble double -cstring1 string -cstring2 string -ctimestamp1 timestamp -ctimestamp2 timestamp -cboolean1 boolean -cboolean2 boolean - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 4 - numRows 20 - rawDataSize 4468 - totalSize 5569 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: drop table small_alltypesorc1a_n2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@small_alltypesorc1a_n2 -PREHOOK: Output: default@small_alltypesorc1a_n2 -POSTHOOK: query: drop table small_alltypesorc1a_n2 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@small_alltypesorc1a_n2 -POSTHOOK: Output: default@small_alltypesorc1a_n2 -PREHOOK: query: drop table small_alltypesorc2a_n2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@small_alltypesorc2a_n2 -PREHOOK: Output: default@small_alltypesorc2a_n2 -POSTHOOK: query: drop table small_alltypesorc2a_n2 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@small_alltypesorc2a_n2 -POSTHOOK: Output: default@small_alltypesorc2a_n2 -PREHOOK: query: drop table small_alltypesorc3a_n2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@small_alltypesorc3a_n2 -PREHOOK: Output: default@small_alltypesorc3a_n2 -POSTHOOK: query: drop table small_alltypesorc3a_n2 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@small_alltypesorc3a_n2 -POSTHOOK: Output: default@small_alltypesorc3a_n2 -PREHOOK: query: drop table small_alltypesorc4a_n2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@small_alltypesorc4a_n2 -PREHOOK: Output: default@small_alltypesorc4a_n2 -POSTHOOK: query: drop table small_alltypesorc4a_n2 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@small_alltypesorc4a_n2 -POSTHOOK: Output: default@small_alltypesorc4a_n2 -PREHOOK: query: drop table small_alltypesorc_a_n2 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: drop table small_alltypesorc_a_n2 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: Output: default@small_alltypesorc_a_n2 -PREHOOK: query: create table small_alltypesorc1a_n2 as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc1a_n2 -POSTHOOK: query: create table small_alltypesorc1a_n2 as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc1a_n2 -POSTHOOK: Lineage: small_alltypesorc1a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a_n2.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc2a_n2 as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc2a_n2 -POSTHOOK: query: create table small_alltypesorc2a_n2 as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc2a_n2 -POSTHOOK: Lineage: small_alltypesorc2a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc2a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a_n2.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc3a_n2 as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc3a_n2 -POSTHOOK: query: create table small_alltypesorc3a_n2 as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc3a_n2 -POSTHOOK: Lineage: small_alltypesorc3a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a_n2.ctinyint SIMPLE [] -PREHOOK: query: create table small_alltypesorc4a_n2 as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc4a_n2 -POSTHOOK: query: create table small_alltypesorc4a_n2 as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc4a_n2 -POSTHOOK: Lineage: small_alltypesorc4a_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc4a_n2.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a_n2.ctinyint SIMPLE [] -PREHOOK: query: create table small_alltypesorc_a_n2 stored as orc as select * from -(select * from (select * from small_alltypesorc1a_n2) sq1 - union all - select * from (select * from small_alltypesorc2a_n2) sq2 - union all - select * from (select * from small_alltypesorc3a_n2) sq3 - union all - select * from (select * from small_alltypesorc4a_n2) sq4) q -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@small_alltypesorc1a_n2 -PREHOOK: Input: default@small_alltypesorc2a_n2 -PREHOOK: Input: default@small_alltypesorc3a_n2 -PREHOOK: Input: default@small_alltypesorc4a_n2 -PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: create table small_alltypesorc_a_n2 stored as orc as select * from -(select * from (select * from small_alltypesorc1a_n2) sq1 - union all - select * from (select * from small_alltypesorc2a_n2) sq2 - union all - select * from (select * from small_alltypesorc3a_n2) sq3 - union all - select * from (select * from small_alltypesorc4a_n2) sq4) q -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@small_alltypesorc1a_n2 -POSTHOOK: Input: default@small_alltypesorc2a_n2 -POSTHOOK: Input: default@small_alltypesorc3a_n2 -POSTHOOK: Input: default@small_alltypesorc4a_n2 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: Lineage: small_alltypesorc_a_n2.cbigint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean1 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean2 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cdouble EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cfloat EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.csmallint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring1 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring2 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp1 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp2 EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctinyint EXPRESSION [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a_n2)small_alltypesorc2a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a_n2)small_alltypesorc3a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a_n2)small_alltypesorc4a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: desc formatted small_alltypesorc_a_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: query: desc formatted small_alltypesorc_a_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -# col_name data_type comment -ctinyint tinyint -csmallint smallint -cint int -cbigint bigint -cfloat float -cdouble double -cstring1 string -cstring2 string -ctimestamp1 timestamp -ctimestamp2 timestamp -cboolean1 boolean -cboolean2 boolean - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 1 - numRows 15 - rawDataSize 3315 - totalSize 3318 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: select 15,count(*) from small_alltypesorc_a_n2 -PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a_n2 -#### A masked pattern was here #### -POSTHOOK: query: select 15,count(*) from small_alltypesorc_a_n2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a_n2 -#### A masked pattern was here #### -15 15 -PREHOOK: query: ANALYZE TABLE small_alltypesorc_a_n2 COMPUTE STATISTICS -PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a_n2 -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a_n2 COMPUTE STATISTICS -POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: Output: default@small_alltypesorc_a_n2 -PREHOOK: query: select 15,count(*) from small_alltypesorc_a_n2 -PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a_n2 -#### A masked pattern was here #### -POSTHOOK: query: select 15,count(*) from small_alltypesorc_a_n2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a_n2 -#### A masked pattern was here #### -15 15 -PREHOOK: query: desc formatted small_alltypesorc_a_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: query: desc formatted small_alltypesorc_a_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -# col_name data_type comment -ctinyint tinyint -csmallint smallint -cint int -cbigint bigint -cfloat float -cdouble double -cstring1 string -cstring2 string -ctimestamp1 timestamp -ctimestamp2 timestamp -cboolean1 boolean -cboolean2 boolean - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 1 - numRows 15 - rawDataSize 3320 - totalSize 3318 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: insert into table small_alltypesorc_a_n2 select * from small_alltypesorc1a_n2 -PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc1a_n2 -PREHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: query: insert into table small_alltypesorc_a_n2 select * from small_alltypesorc1a_n2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc1a_n2 -POSTHOOK: Output: default@small_alltypesorc_a_n2 -POSTHOOK: Lineage: small_alltypesorc_a_n2.cbigint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean1 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cboolean2 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cdouble SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cfloat SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.csmallint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring1 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.cstring2 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp1 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctimestamp2 SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a_n2.ctinyint SIMPLE [(small_alltypesorc1a_n2)small_alltypesorc1a_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: desc formatted small_alltypesorc_a_n2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@small_alltypesorc_a_n2 -POSTHOOK: query: desc formatted small_alltypesorc_a_n2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@small_alltypesorc_a_n2 -# col_name data_type comment -ctinyint tinyint -csmallint smallint -cint int -cbigint bigint -cfloat float -cdouble double -cstring1 string -cstring2 string -ctimestamp1 timestamp -ctimestamp2 timestamp -cboolean1 boolean -cboolean2 boolean - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 2 - numRows 20 - rawDataSize 4305 - totalSize 4735 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 diff --git ql/src/test/results/clientpositive/llap/union_stats.q.out ql/src/test/results/clientpositive/llap/union_stats.q.out deleted file mode 100644 index f9524f58d1..0000000000 --- ql/src/test/results/clientpositive/llap/union_stats.q.out +++ /dev/null @@ -1,627 +0,0 @@ -PREHOOK: query: explain extended create table t as select * from src union all select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: explain extended create table t as select * from src union all select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -OPTIMIZED SQL: SELECT `key`, `value` -FROM `default`.`src` -UNION ALL -SELECT `key`, `value` -FROM `default`.`src` -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-4 depends on stages: Stage-2, Stage-0 - Stage-3 depends on stages: Stage-4 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 1 <- Union 2 (CONTAINS) - Map 3 <- Union 2 (CONTAINS) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Execution mode: vectorized, llap - LLAP IO: no inputs - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [src] - Map 3 - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Execution mode: vectorized, llap - LLAP IO: no inputs - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [src] - Union 2 - Vertex: Union 2 - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-4 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - - Stage: Stage-3 - Stats Work - Basic Stats Work: -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: create table t as select * from src union all select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t as select * from src union all select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from t -PREHOOK: type: QUERY -PREHOOK: Input: default@t -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t -#### A masked pattern was here #### -1000 -PREHOOK: query: desc formatted t -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t -POSTHOOK: query: desc formatted t -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 2 - numRows 1000 - rawDataSize 10624 - totalSize 11624 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: create table tt as select * from t union all select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Input: default@t -PREHOOK: Output: database:default -PREHOOK: Output: default@tt -POSTHOOK: query: create table tt as select * from t union all select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Input: default@t -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tt -POSTHOOK: Lineage: tt.key EXPRESSION [(t)t.FieldSchema(name:key, type:string, comment:null), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tt.value EXPRESSION [(t)t.FieldSchema(name:value, type:string, comment:null), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted tt -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tt -POSTHOOK: query: desc formatted tt -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tt -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 2 - numRows 1500 - rawDataSize 15936 - totalSize 17436 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: drop table tt -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tt -PREHOOK: Output: default@tt -POSTHOOK: query: drop table tt -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tt -POSTHOOK: Output: default@tt -PREHOOK: query: create table tt as select * from src union all select * from t -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Input: default@t -PREHOOK: Output: database:default -PREHOOK: Output: default@tt -POSTHOOK: query: create table tt as select * from src union all select * from t -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Input: default@t -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tt -POSTHOOK: Lineage: tt.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (t)t.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: tt.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (t)t.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: desc formatted tt -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tt -POSTHOOK: query: desc formatted tt -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tt -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 2 - numRows 1500 - rawDataSize 15936 - totalSize 17436 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: create table t1 like src -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 like src -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 like src -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 like src -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3 like src -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3 like src -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: explain from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -insert overwrite table t3 select * -PREHOOK: type: QUERY -POSTHOOK: query: explain from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -insert overwrite table t3 select * -POSTHOOK: type: QUERY -Plan optimized by CBO. - -Vertex dependency in root stage -Map 1 <- Union 2 (CONTAINS) -Map 6 <- Union 2 (CONTAINS) -Reducer 3 <- Union 2 (CUSTOM_SIMPLE_EDGE) -Reducer 4 <- Union 2 (CUSTOM_SIMPLE_EDGE) -Reducer 5 <- Union 2 (CUSTOM_SIMPLE_EDGE) - -Stage-5 - Stats Work{} - Stage-0 - Move Operator - table:{"name:":"default.t1"} - Stage-4 - Dependency Collection{} - Stage-3 - Reducer 3 llap - File Output Operator [FS_14] - Group By Operator [GBY_12] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"] - <-Union 2 [CUSTOM_SIMPLE_EDGE] - <-Map 1 [CONTAINS] llap - File Output Operator [FS_36] - table:{"name:":"default.t1"} - Select Operator [SEL_34] (rows=500 width=178) - Output:["_col0","_col1"] - TableScan [TS_33] (rows=500 width=178) - Output:["key","value"] - Reduce Output Operator [RS_45] - Group By Operator [GBY_42] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"] - Select Operator [SEL_37] (rows=1000 width=178) - Output:["key","value"] - Please refer to the previous Select Operator [SEL_34] - File Output Operator [FS_38] - table:{"name:":"default.t2"} - Please refer to the previous Select Operator [SEL_34] - Reduce Output Operator [RS_46] - Group By Operator [GBY_43] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"] - Select Operator [SEL_39] (rows=1000 width=178) - Output:["key","value"] - Please refer to the previous Select Operator [SEL_34] - File Output Operator [FS_40] - table:{"name:":"default.t3"} - Please refer to the previous Select Operator [SEL_34] - Reduce Output Operator [RS_47] - Group By Operator [GBY_44] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"] - Select Operator [SEL_41] (rows=1000 width=178) - Output:["key","value"] - Please refer to the previous Select Operator [SEL_34] - <-Map 6 [CONTAINS] llap - File Output Operator [FS_51] - table:{"name:":"default.t1"} - Select Operator [SEL_49] (rows=500 width=178) - Output:["_col0","_col1"] - TableScan [TS_48] (rows=500 width=178) - Output:["key","value"] - Reduce Output Operator [RS_60] - Group By Operator [GBY_57] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"] - Select Operator [SEL_52] (rows=1000 width=178) - Output:["key","value"] - Please refer to the previous Select Operator [SEL_49] - File Output Operator [FS_53] - table:{"name:":"default.t2"} - Please refer to the previous Select Operator [SEL_49] - Reduce Output Operator [RS_61] - Group By Operator [GBY_58] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"] - Select Operator [SEL_54] (rows=1000 width=178) - Output:["key","value"] - Please refer to the previous Select Operator [SEL_49] - File Output Operator [FS_55] - table:{"name:":"default.t3"} - Please refer to the previous Select Operator [SEL_49] - Reduce Output Operator [RS_62] - Group By Operator [GBY_59] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"] - Select Operator [SEL_56] (rows=1000 width=178) - Output:["key","value"] - Please refer to the previous Select Operator [SEL_49] - Reducer 4 llap - File Output Operator [FS_23] - Group By Operator [GBY_21] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"] - <- Please refer to the previous Union 2 [CUSTOM_SIMPLE_EDGE] - Reducer 5 llap - File Output Operator [FS_32] - Group By Operator [GBY_30] (rows=1 width=880) - Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"] - <- Please refer to the previous Union 2 [CUSTOM_SIMPLE_EDGE] -Stage-6 - Stats Work{} - Stage-1 - Move Operator - table:{"name:":"default.t2"} - Please refer to the previous Stage-4 -Stage-7 - Stats Work{} - Stage-2 - Move Operator - table:{"name:":"default.t3"} - Please refer to the previous Stage-4 - -PREHOOK: query: from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -insert overwrite table t3 select * -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -POSTHOOK: query: from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -insert overwrite table t3 select * -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t3.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted t1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc formatted t1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 -# col_name data_type comment -key string default -value string default - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 - numRows 1000 - rawDataSize 10624 - totalSize 11624 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted t2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: desc formatted t2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 -# col_name data_type comment -key string default -value string default - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 - numRows 1000 - rawDataSize 10624 - totalSize 11624 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: select count(1) from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -#### A masked pattern was here #### -1000 diff --git ql/src/test/results/clientpositive/loadpart_err.q.out ql/src/test/results/clientpositive/loadpart_err.q.out deleted file mode 100644 index 7e035ff0c4..0000000000 --- ql/src/test/results/clientpositive/loadpart_err.q.out +++ /dev/null @@ -1,28 +0,0 @@ -PREHOOK: query: CREATE TABLE loadpart1(a STRING, b STRING) PARTITIONED BY (ds STRING) -PREHOOK: type: CREATETABLE -POSTHOOK: query: CREATE TABLE loadpart1(a STRING, b STRING) PARTITIONED BY (ds STRING) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: default@loadpart1 -PREHOOK: query: INSERT OVERWRITE TABLE loadpart1 PARTITION (ds='2009-01-01') -SELECT TRANSFORM(src.key, src.value) USING '../../data/scripts/error_script' AS (tkey, tvalue) -FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@loadpart1@ds=2009-01-01 -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask -PREHOOK: query: DESCRIBE loadpart1 -PREHOOK: type: DESCTABLE -POSTHOOK: query: DESCRIBE loadpart1 -POSTHOOK: type: DESCTABLE -a string -b string -ds string -PREHOOK: query: SHOW PARTITIONS loadpart1 -PREHOOK: type: SHOWPARTITIONS -POSTHOOK: query: SHOW PARTITIONS loadpart1 -POSTHOOK: type: SHOWPARTITIONS -FAILED: Error in semantic analysis: line 3:23 Invalid Path '../data1/files/kv1.txt': No files matching path file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/data1/files/kv1.txt -PREHOOK: query: SHOW PARTITIONS loadpart1 -PREHOOK: type: SHOWPARTITIONS -POSTHOOK: query: SHOW PARTITIONS loadpart1 -POSTHOOK: type: SHOWPARTITIONS diff --git ql/src/test/results/clientpositive/perf/tez/cbo_query44.q.out ql/src/test/results/clientpositive/perf/tez/cbo_query44.q.out deleted file mode 100644 index 73d4213656..0000000000 --- ql/src/test/results/clientpositive/perf/tez/cbo_query44.q.out +++ /dev/null @@ -1,115 +0,0 @@ -Warning: Shuffle Join MERGEJOIN[103][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 8' is a cross product -PREHOOK: query: explain cbo -select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing -from(select * - from (select item_sk,rank() over (order by rank_col asc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V1)V11 - where rnk < 11) asceding, - (select * - from (select item_sk,rank() over (order by rank_col desc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V2)V21 - where rnk < 11) descending, -item i1, -item i2 -where asceding.rnk = descending.rnk - and i1.i_item_sk=asceding.item_sk - and i2.i_item_sk=descending.item_sk -order by asceding.rnk -limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@item -PREHOOK: Input: default@store_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing -from(select * - from (select item_sk,rank() over (order by rank_col asc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V1)V11 - where rnk < 11) asceding, - (select * - from (select item_sk,rank() over (order by rank_col desc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V2)V21 - where rnk < 11) descending, -item i1, -item i2 -where asceding.rnk = descending.rnk - and i1.i_item_sk=asceding.item_sk - and i2.i_item_sk=descending.item_sk -order by asceding.rnk -limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@item -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveSortLimit(sort0=[$0], dir0=[ASC], fetch=[100]) - HiveProject(rnk=[$3], best_performing=[$1], worst_performing=[$5]) - HiveJoin(condition=[=($3, $7)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($0, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_product_name=[$21]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, item]], table:alias=[i1]) - HiveProject(item_sk=[$0], rank_window_0=[$1]) - HiveFilter(condition=[AND(<($1, 11), IS NOT NULL($0))]) - HiveProject(item_sk=[$0], rank_window_0=[rank() OVER (PARTITION BY 0 ORDER BY $1 NULLS FIRST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)]) - HiveJoin(condition=[>($1, *(0.9, $2))], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject($f0=[$0], $f1=[/($1, $2)]) - HiveAggregate(group=[{2}], agg#0=[sum($22)], agg#1=[count($22)]) - HiveFilter(condition=[=($7, 410)]) - HiveTableScan(table=[[default, store_sales]], table:alias=[ss1]) - HiveProject($f1=[/($1, $2)]) - HiveAggregate(group=[{0}], agg#0=[sum($1)], agg#1=[count($1)]) - HiveProject($f0=[true], $f1=[$22]) - HiveFilter(condition=[AND(=($7, 410), IS NULL($5))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(i_item_sk=[$0], i_product_name=[$1], item_sk=[$2], rank_window_0=[$3]) - HiveJoin(condition=[=($0, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_product_name=[$21]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, item]], table:alias=[i2]) - HiveProject(item_sk=[$0], rank_window_0=[$1]) - HiveFilter(condition=[AND(<($1, 11), IS NOT NULL($0))]) - HiveProject(item_sk=[$0], rank_window_0=[rank() OVER (PARTITION BY 0 ORDER BY $1 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)]) - HiveJoin(condition=[>($1, *(0.9, $2))], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject($f0=[$0], $f1=[/($1, $2)]) - HiveAggregate(group=[{2}], agg#0=[sum($22)], agg#1=[count($22)]) - HiveFilter(condition=[=($7, 410)]) - HiveTableScan(table=[[default, store_sales]], table:alias=[ss1]) - HiveProject($f1=[/($1, $2)]) - HiveAggregate(group=[{0}], agg#0=[sum($1)], agg#1=[count($1)]) - HiveProject($f0=[true], $f1=[$22]) - HiveFilter(condition=[AND(=($7, 410), IS NULL($5))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - diff --git ql/src/test/results/clientpositive/perf/tez/cbo_query45.q.out ql/src/test/results/clientpositive/perf/tez/cbo_query45.q.out deleted file mode 100644 index 986dae94f5..0000000000 --- ql/src/test/results/clientpositive/perf/tez/cbo_query45.q.out +++ /dev/null @@ -1,89 +0,0 @@ -Warning: Shuffle Join MERGEJOIN[133][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product -PREHOOK: query: explain cbo -select ca_zip, ca_county, sum(ws_sales_price) - from web_sales, customer, customer_address, date_dim, item - where ws_bill_customer_sk = c_customer_sk - and c_current_addr_sk = ca_address_sk - and ws_item_sk = i_item_sk - and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792') - or - i_item_id in (select i_item_id - from item - where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29) - ) - ) - and ws_sold_date_sk = d_date_sk - and d_qoy = 2 and d_year = 2000 - group by ca_zip, ca_county - order by ca_zip, ca_county - limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@customer -PREHOOK: Input: default@customer_address -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@item -PREHOOK: Input: default@web_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select ca_zip, ca_county, sum(ws_sales_price) - from web_sales, customer, customer_address, date_dim, item - where ws_bill_customer_sk = c_customer_sk - and c_current_addr_sk = ca_address_sk - and ws_item_sk = i_item_sk - and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792') - or - i_item_id in (select i_item_id - from item - where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29) - ) - ) - and ws_sold_date_sk = d_date_sk - and d_qoy = 2 and d_year = 2000 - group by ca_zip, ca_county - order by ca_zip, ca_county - limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@customer -POSTHOOK: Input: default@customer_address -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@item -POSTHOOK: Input: default@web_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveSortLimit(sort0=[$0], sort1=[$1], dir0=[ASC], dir1=[ASC], fetch=[100]) - HiveProject(ca_zip=[$1], ca_county=[$0], $f2=[$2]) - HiveAggregate(group=[{7, 8}], agg#0=[sum($3)]) - HiveFilter(condition=[OR(IN(substr($8, 1, 5), _UTF-16LE'85669', _UTF-16LE'86197', _UTF-16LE'88274', _UTF-16LE'83405', _UTF-16LE'86475', _UTF-16LE'85392', _UTF-16LE'85460', _UTF-16LE'80348', _UTF-16LE'81792'), CASE(=($14, 0), false, IS NOT NULL($17), true, IS NULL($13), null, <($15, $14), null, false))]) - HiveProject(ws_sold_date_sk=[$9], ws_item_sk=[$10], ws_bill_customer_sk=[$11], ws_sales_price=[$12], c_customer_sk=[$0], c_current_addr_sk=[$1], ca_address_sk=[$2], ca_county=[$3], ca_zip=[$4], d_date_sk=[$13], d_year=[$14], d_qoy=[$15], i_item_sk=[$5], i_item_id=[$6], c=[$16], ck=[$17], i_item_id0=[$7], i1142=[$8]) - HiveJoin(condition=[true], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($11, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($1, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(c_customer_sk=[$0], c_current_addr_sk=[$4]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($4))]) - HiveTableScan(table=[[default, customer]], table:alias=[customer]) - HiveProject(ca_address_sk=[$0], ca_county=[$7], ca_zip=[$9]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, customer_address]], table:alias=[customer_address]) - HiveProject(i_item_sk=[$0], i_item_id=[$1], i_item_id0=[$2], i1142=[$3], ws_sold_date_sk=[$4], ws_item_sk=[$5], ws_bill_customer_sk=[$6], ws_sales_price=[$7], d_date_sk=[$8], d_year=[$9], d_qoy=[$10]) - HiveJoin(condition=[=($5, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($1, $2)], joinType=[left], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_item_id=[$1]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveProject(i_item_id=[$0], i1142=[true]) - HiveAggregate(group=[{1}]) - HiveFilter(condition=[IN($0, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29)]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveProject(ws_sold_date_sk=[$0], ws_item_sk=[$1], ws_bill_customer_sk=[$2], ws_sales_price=[$3], d_date_sk=[$4], d_year=[$5], d_qoy=[$6]) - HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ws_sold_date_sk=[$0], ws_item_sk=[$3], ws_bill_customer_sk=[$4], ws_sales_price=[$21]) - HiveFilter(condition=[AND(IS NOT NULL($4), IS NOT NULL($0), IS NOT NULL($3))]) - HiveTableScan(table=[[default, web_sales]], table:alias=[web_sales]) - HiveProject(d_date_sk=[$0], d_year=[CAST(2000):INTEGER], d_qoy=[CAST(2):INTEGER]) - HiveFilter(condition=[AND(=($10, 2), =($6, 2000), IS NOT NULL($0))]) - HiveTableScan(table=[[default, date_dim]], table:alias=[date_dim]) - HiveProject(c=[$0], ck=[$1]) - HiveAggregate(group=[{}], c=[COUNT()], ck=[COUNT($1)]) - HiveFilter(condition=[IN($0, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29)]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - diff --git ql/src/test/results/clientpositive/perf/tez/cbo_query67.q.out ql/src/test/results/clientpositive/perf/tez/cbo_query67.q.out deleted file mode 100644 index fbe6779bc0..0000000000 --- ql/src/test/results/clientpositive/perf/tez/cbo_query67.q.out +++ /dev/null @@ -1,120 +0,0 @@ -PREHOOK: query: explain cbo -select * -from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rank() over (partition by i_category order by sumsales desc) rk - from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales - from store_sales - ,date_dim - ,store - ,item - where ss_sold_date_sk=d_date_sk - and ss_item_sk=i_item_sk - and ss_store_sk = s_store_sk - and d_month_seq between 1212 and 1212+11 - group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id))dw1) dw2 -where rk <= 100 -order by i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rk -limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@item -PREHOOK: Input: default@store -PREHOOK: Input: default@store_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select * -from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rank() over (partition by i_category order by sumsales desc) rk - from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales - from store_sales - ,date_dim - ,store - ,item - where ss_sold_date_sk=d_date_sk - and ss_item_sk=i_item_sk - and ss_store_sk = s_store_sk - and d_month_seq between 1212 and 1212+11 - group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id))dw1) dw2 -where rk <= 100 -order by i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rk -limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@item -POSTHOOK: Input: default@store -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveSortLimit(sort0=[$0], sort1=[$1], sort2=[$2], sort3=[$3], sort4=[$4], sort5=[$5], sort6=[$6], sort7=[$7], sort8=[$8], sort9=[$9], dir0=[ASC], dir1=[ASC], dir2=[ASC], dir3=[ASC], dir4=[ASC], dir5=[ASC], dir6=[ASC], dir7=[ASC], dir8=[ASC], dir9=[ASC], fetch=[100]) - HiveProject(i_category=[$0], i_class=[$1], i_brand=[$2], i_product_name=[$3], d_year=[$4], d_qoy=[$5], d_moy=[$6], s_store_id=[$7], sumsales=[$8], rank_window_0=[$9]) - HiveFilter(condition=[<=($9, 100)]) - HiveProject(i_category=[$0], i_class=[$1], i_brand=[$2], i_product_name=[$3], d_year=[$4], d_qoy=[$5], d_moy=[$6], s_store_id=[$7], sumsales=[$8], rank_window_0=[rank() OVER (PARTITION BY $0 ORDER BY $8 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)]) - HiveProject($f0=[$0], $f1=[$1], $f2=[$2], $f3=[$3], $f4=[$4], $f5=[$5], $f6=[$6], $f7=[$7], $f8=[$8]) - HiveAggregate(group=[{0, 1, 2, 3, 4, 5, 6, 7}], groups=[[{0, 1, 2, 3, 4, 5, 6, 7}, {0, 1, 2, 3, 4, 5, 6}, {0, 1, 2, 3, 4, 5}, {0, 1, 2, 3, 4}, {0, 1, 2, 3}, {0, 1, 2}, {0, 1}, {0}, {}]], agg#0=[sum($8)]) - HiveProject($f0=[$3], $f1=[$2], $f2=[$1], $f3=[$4], $f4=[$12], $f5=[$14], $f6=[$13], $f7=[$16], $f8=[CASE(AND(IS NOT NULL($9), IS NOT NULL($8)), *($9, CAST($8):DECIMAL(10, 0)), 0)]) - HiveJoin(condition=[=($6, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_brand=[$8], i_class=[$10], i_category=[$12], i_product_name=[$21]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveJoin(condition=[=($2, $10)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($0, $5)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ss_sold_date_sk=[$0], ss_item_sk=[$2], ss_store_sk=[$7], ss_quantity=[$10], ss_sales_price=[$13]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($7), IS NOT NULL($2))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(d_date_sk=[$0], d_month_seq=[$3], d_year=[$6], d_moy=[$8], d_qoy=[$10]) - HiveFilter(condition=[AND(BETWEEN(false, $3, 1212, 1223), IS NOT NULL($0))]) - HiveTableScan(table=[[default, date_dim]], table:alias=[date_dim]) - HiveProject(s_store_sk=[$0], s_store_id=[$1]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, store]], table:alias=[store]) - diff --git ql/src/test/results/clientpositive/perf/tez/cbo_query70.q.out ql/src/test/results/clientpositive/perf/tez/cbo_query70.q.out deleted file mode 100644 index aa04df83fa..0000000000 --- ql/src/test/results/clientpositive/perf/tez/cbo_query70.q.out +++ /dev/null @@ -1,119 +0,0 @@ -PREHOOK: query: explain cbo -select - sum(ss_net_profit) as total_sum - ,s_state - ,s_county - ,grouping(s_state)+grouping(s_county) as lochierarchy - ,rank() over ( - partition by grouping(s_state)+grouping(s_county), - case when grouping(s_county) = 0 then s_state end - order by sum(ss_net_profit) desc) as rank_within_parent - from - store_sales - ,date_dim d1 - ,store - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - and s_state in - ( select s_state - from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking - from store_sales, store, date_dim - where d_month_seq between 1212 and 1212+11 - and d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - group by s_state - ) tmp1 - where ranking <= 5 - ) - group by rollup(s_state,s_county) - order by - lochierarchy desc - ,case when lochierarchy = 0 then s_state end - ,rank_within_parent - limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@store -PREHOOK: Input: default@store_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select - sum(ss_net_profit) as total_sum - ,s_state - ,s_county - ,grouping(s_state)+grouping(s_county) as lochierarchy - ,rank() over ( - partition by grouping(s_state)+grouping(s_county), - case when grouping(s_county) = 0 then s_state end - order by sum(ss_net_profit) desc) as rank_within_parent - from - store_sales - ,date_dim d1 - ,store - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - and s_state in - ( select s_state - from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking - from store_sales, store, date_dim - where d_month_seq between 1212 and 1212+11 - and d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - group by s_state - ) tmp1 - where ranking <= 5 - ) - group by rollup(s_state,s_county) - order by - lochierarchy desc - ,case when lochierarchy = 0 then s_state end - ,rank_within_parent - limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@store -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveProject(total_sum=[$0], s_state=[$1], s_county=[$2], lochierarchy=[$3], rank_within_parent=[$4]) - HiveSortLimit(sort0=[$3], sort1=[$5], sort2=[$4], dir0=[DESC-nulls-last], dir1=[ASC], dir2=[ASC], fetch=[100]) - HiveProject(total_sum=[$2], s_state=[$0], s_county=[$1], lochierarchy=[+(grouping($3, 1), grouping($3, 0))], rank_within_parent=[rank() OVER (PARTITION BY +(grouping($3, 1), grouping($3, 0)), CASE(=(grouping($3, 0), 0), $0, null) ORDER BY $2 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)], (tok_function when (= (tok_table_or_col lochierarchy) 0) (tok_table_or_col s_state))=[CASE(=(+(grouping($3, 1), grouping($3, 0)), 0), $0, null)]) - HiveProject($f0=[$0], $f1=[$1], $f2=[$2], GROUPING__ID=[$3]) - HiveAggregate(group=[{0, 1}], groups=[[{0, 1}, {0}, {}]], agg#0=[sum($2)], GROUPING__ID=[GROUPING__ID()]) - HiveProject($f0=[$7], $f1=[$6], $f2=[$2]) - HiveJoin(condition=[=($7, $8)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($5, $1)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($3, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ss_sold_date_sk=[$0], ss_store_sk=[$7], ss_net_profit=[$22]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($7))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(d_date_sk=[$0], d_month_seq=[$3]) - HiveFilter(condition=[AND(BETWEEN(false, $3, 1212, 1223), IS NOT NULL($0))]) - HiveTableScan(table=[[default, date_dim]], table:alias=[d1]) - HiveProject(s_store_sk=[$0], s_county=[$23], s_state=[$24]) - HiveFilter(condition=[AND(IS NOT NULL($24), IS NOT NULL($0))]) - HiveTableScan(table=[[default, store]], table:alias=[store]) - HiveProject(s_state=[$0]) - HiveFilter(condition=[<=($1, 5)]) - HiveProject((tok_table_or_col s_state)=[$0], rank_window_0=[$1]) - HiveProject((tok_table_or_col s_state)=[$0], rank_window_0=[rank() OVER (PARTITION BY $0 ORDER BY $1 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)], window_col_0=[$1]) - HiveProject(s_state=[$0], $f1=[$1]) - HiveAggregate(group=[{6}], agg#0=[sum($2)]) - HiveJoin(condition=[=($5, $1)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($3, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ss_sold_date_sk=[$0], ss_store_sk=[$7], ss_net_profit=[$22]) - HiveFilter(condition=[AND(IS NOT NULL($7), IS NOT NULL($0))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(d_date_sk=[$0], d_month_seq=[$3]) - HiveFilter(condition=[AND(BETWEEN(false, $3, 1212, 1223), IS NOT NULL($0))]) - HiveTableScan(table=[[default, date_dim]], table:alias=[date_dim]) - HiveProject(s_store_sk=[$0], s_state=[$24]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($24))]) - HiveTableScan(table=[[default, store]], table:alias=[store]) - diff --git ql/src/test/results/clientpositive/perf/tez/cbo_query86.q.out ql/src/test/results/clientpositive/perf/tez/cbo_query86.q.out deleted file mode 100644 index 0832a6360a..0000000000 --- ql/src/test/results/clientpositive/perf/tez/cbo_query86.q.out +++ /dev/null @@ -1,77 +0,0 @@ -PREHOOK: query: explain cbo -select - sum(ws_net_paid) as total_sum - ,i_category - ,i_class - ,grouping(i_category)+grouping(i_class) as lochierarchy - ,rank() over ( - partition by grouping(i_category)+grouping(i_class), - case when grouping(i_class) = 0 then i_category end - order by sum(ws_net_paid) desc) as rank_within_parent - from - web_sales - ,date_dim d1 - ,item - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ws_sold_date_sk - and i_item_sk = ws_item_sk - group by rollup(i_category,i_class) - order by - lochierarchy desc, - case when lochierarchy = 0 then i_category end, - rank_within_parent - limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@item -PREHOOK: Input: default@web_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select - sum(ws_net_paid) as total_sum - ,i_category - ,i_class - ,grouping(i_category)+grouping(i_class) as lochierarchy - ,rank() over ( - partition by grouping(i_category)+grouping(i_class), - case when grouping(i_class) = 0 then i_category end - order by sum(ws_net_paid) desc) as rank_within_parent - from - web_sales - ,date_dim d1 - ,item - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ws_sold_date_sk - and i_item_sk = ws_item_sk - group by rollup(i_category,i_class) - order by - lochierarchy desc, - case when lochierarchy = 0 then i_category end, - rank_within_parent - limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@item -POSTHOOK: Input: default@web_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveProject(total_sum=[$0], i_category=[$1], i_class=[$2], lochierarchy=[$3], rank_within_parent=[$4]) - HiveSortLimit(sort0=[$3], sort1=[$5], sort2=[$4], dir0=[DESC-nulls-last], dir1=[ASC], dir2=[ASC], fetch=[100]) - HiveProject(total_sum=[$2], i_category=[$0], i_class=[$1], lochierarchy=[+(grouping($3, 1), grouping($3, 0))], rank_within_parent=[rank() OVER (PARTITION BY +(grouping($3, 1), grouping($3, 0)), CASE(=(grouping($3, 0), 0), $0, null) ORDER BY $2 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)], (tok_function when (= (tok_table_or_col lochierarchy) 0) (tok_table_or_col i_category))=[CASE(=(+(grouping($3, 1), grouping($3, 0)), 0), $0, null)]) - HiveProject($f0=[$0], $f1=[$1], $f2=[$2], GROUPING__ID=[$3]) - HiveAggregate(group=[{0, 1}], groups=[[{0, 1}, {0}, {}]], agg#0=[sum($2)], GROUPING__ID=[GROUPING__ID()]) - HiveProject($f0=[$2], $f1=[$1], $f2=[$5]) - HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_class=[$10], i_category=[$12]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveJoin(condition=[=($3, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ws_sold_date_sk=[$0], ws_item_sk=[$3], ws_net_paid=[$29]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($3))]) - HiveTableScan(table=[[default, web_sales]], table:alias=[web_sales]) - HiveProject(d_date_sk=[$0], d_month_seq=[$3]) - HiveFilter(condition=[AND(BETWEEN(false, $3, 1212, 1223), IS NOT NULL($0))]) - HiveTableScan(table=[[default, date_dim]], table:alias=[d1]) - diff --git ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query44.q.out ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query44.q.out deleted file mode 100644 index 8cc89f6df2..0000000000 --- ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query44.q.out +++ /dev/null @@ -1,113 +0,0 @@ -Warning: Shuffle Join MERGEJOIN[101][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 8' is a cross product -PREHOOK: query: explain cbo -select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing -from(select * - from (select item_sk,rank() over (order by rank_col asc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V1)V11 - where rnk < 11) asceding, - (select * - from (select item_sk,rank() over (order by rank_col desc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V2)V21 - where rnk < 11) descending, -item i1, -item i2 -where asceding.rnk = descending.rnk - and i1.i_item_sk=asceding.item_sk - and i2.i_item_sk=descending.item_sk -order by asceding.rnk -limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@item -PREHOOK: Input: default@store_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing -from(select * - from (select item_sk,rank() over (order by rank_col asc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V1)V11 - where rnk < 11) asceding, - (select * - from (select item_sk,rank() over (order by rank_col desc) rnk - from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col - from store_sales ss1 - where ss_store_sk = 410 - group by ss_item_sk - having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col - from store_sales - where ss_store_sk = 410 - and ss_hdemo_sk is null - group by ss_store_sk))V2)V21 - where rnk < 11) descending, -item i1, -item i2 -where asceding.rnk = descending.rnk - and i1.i_item_sk=asceding.item_sk - and i2.i_item_sk=descending.item_sk -order by asceding.rnk -limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@item -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveSortLimit(sort0=[$0], dir0=[ASC], fetch=[100]) - HiveProject(rnk=[$3], best_performing=[$1], worst_performing=[$5]) - HiveJoin(condition=[=($3, $7)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($0, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_product_name=[$21]) - HiveTableScan(table=[[default, item]], table:alias=[i1]) - HiveProject(item_sk=[$0], rank_window_0=[$1]) - HiveFilter(condition=[<($1, 11)]) - HiveProject(item_sk=[$0], rank_window_0=[rank() OVER (PARTITION BY 0 ORDER BY $1 NULLS FIRST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)]) - HiveJoin(condition=[>($1, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject($f0=[$0], $f1=[/($1, $2)]) - HiveAggregate(group=[{2}], agg#0=[sum($22)], agg#1=[count($22)]) - HiveFilter(condition=[=($7, 410)]) - HiveTableScan(table=[[default, store_sales]], table:alias=[ss1]) - HiveProject(*=[*(0.9, /($1, $2))]) - HiveAggregate(group=[{0}], agg#0=[sum($1)], agg#1=[count($1)]) - HiveProject($f0=[true], $f1=[$22]) - HiveFilter(condition=[AND(=($7, 410), IS NULL($5))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(i_item_sk=[$0], i_product_name=[$1], item_sk=[$2], rank_window_0=[$3]) - HiveJoin(condition=[=($0, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_product_name=[$21]) - HiveTableScan(table=[[default, item]], table:alias=[i2]) - HiveProject(item_sk=[$0], rank_window_0=[$1]) - HiveFilter(condition=[<($1, 11)]) - HiveProject(item_sk=[$0], rank_window_0=[rank() OVER (PARTITION BY 0 ORDER BY $1 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)]) - HiveJoin(condition=[>($1, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject($f0=[$0], $f1=[/($1, $2)]) - HiveAggregate(group=[{2}], agg#0=[sum($22)], agg#1=[count($22)]) - HiveFilter(condition=[=($7, 410)]) - HiveTableScan(table=[[default, store_sales]], table:alias=[ss1]) - HiveProject(*=[*(0.9, /($1, $2))]) - HiveAggregate(group=[{0}], agg#0=[sum($1)], agg#1=[count($1)]) - HiveProject($f0=[true], $f1=[$22]) - HiveFilter(condition=[AND(=($7, 410), IS NULL($5))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - diff --git ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query45.q.out ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query45.q.out deleted file mode 100644 index 85f8116c2f..0000000000 --- ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query45.q.out +++ /dev/null @@ -1,81 +0,0 @@ -PREHOOK: query: explain cbo -select ca_zip, ca_county, sum(ws_sales_price) - from web_sales, customer, customer_address, date_dim, item - where ws_bill_customer_sk = c_customer_sk - and c_current_addr_sk = ca_address_sk - and ws_item_sk = i_item_sk - and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792') - or - i_item_id in (select i_item_id - from item - where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29) - ) - ) - and ws_sold_date_sk = d_date_sk - and d_qoy = 2 and d_year = 2000 - group by ca_zip, ca_county - order by ca_zip, ca_county - limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@customer -PREHOOK: Input: default@customer_address -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@item -PREHOOK: Input: default@web_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select ca_zip, ca_county, sum(ws_sales_price) - from web_sales, customer, customer_address, date_dim, item - where ws_bill_customer_sk = c_customer_sk - and c_current_addr_sk = ca_address_sk - and ws_item_sk = i_item_sk - and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792') - or - i_item_id in (select i_item_id - from item - where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29) - ) - ) - and ws_sold_date_sk = d_date_sk - and d_qoy = 2 and d_year = 2000 - group by ca_zip, ca_county - order by ca_zip, ca_county - limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@customer -POSTHOOK: Input: default@customer_address -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@item -POSTHOOK: Input: default@web_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveSortLimit(sort0=[$0], sort1=[$1], dir0=[ASC], dir1=[ASC], fetch=[100]) - HiveProject(ca_zip=[$1], ca_county=[$0], $f2=[$2]) - HiveAggregate(group=[{7, 8}], agg#0=[sum($3)]) - HiveFilter(condition=[OR(IN(substr($8, 1, 5), _UTF-16LE'85669', _UTF-16LE'86197', _UTF-16LE'88274', _UTF-16LE'83405', _UTF-16LE'86475', _UTF-16LE'85392', _UTF-16LE'85460', _UTF-16LE'80348', _UTF-16LE'81792'), IS NOT NULL($15))]) - HiveProject(ws_sold_date_sk=[$9], ws_item_sk=[$10], ws_bill_customer_sk=[$11], ws_sales_price=[$12], c_customer_sk=[$0], c_current_addr_sk=[$1], ca_address_sk=[$2], ca_county=[$3], ca_zip=[$4], d_date_sk=[$13], d_year=[$14], d_qoy=[$15], i_item_sk=[$5], i_item_id=[$6], i_item_id0=[$7], i1160=[$8]) - HiveJoin(condition=[=($11, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($1, $2)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(c_customer_sk=[$0], c_current_addr_sk=[$4]) - HiveFilter(condition=[IS NOT NULL($4)]) - HiveTableScan(table=[[default, customer]], table:alias=[customer]) - HiveProject(ca_address_sk=[$0], ca_county=[$7], ca_zip=[$9]) - HiveTableScan(table=[[default, customer_address]], table:alias=[customer_address]) - HiveProject(i_item_sk=[$0], i_item_id=[$1], i_item_id0=[$2], i1160=[$3], ws_sold_date_sk=[$4], ws_item_sk=[$5], ws_bill_customer_sk=[$6], ws_sales_price=[$7], d_date_sk=[$8], d_year=[$9], d_qoy=[$10]) - HiveJoin(condition=[=($5, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($1, $2)], joinType=[left], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_item_id=[$1]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveProject(i_item_id=[$0], i1160=[true]) - HiveAggregate(group=[{1}]) - HiveFilter(condition=[IN($0, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29)]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveProject(ws_sold_date_sk=[$0], ws_item_sk=[$1], ws_bill_customer_sk=[$2], ws_sales_price=[$3], d_date_sk=[$4], d_year=[$5], d_qoy=[$6]) - HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ws_sold_date_sk=[$0], ws_item_sk=[$3], ws_bill_customer_sk=[$4], ws_sales_price=[$21]) - HiveFilter(condition=[AND(IS NOT NULL($4), IS NOT NULL($0))]) - HiveTableScan(table=[[default, web_sales]], table:alias=[web_sales]) - HiveProject(d_date_sk=[$0], d_year=[CAST(2000):INTEGER], d_qoy=[CAST(2):INTEGER]) - HiveFilter(condition=[AND(=($10, 2), =($6, 2000))]) - HiveTableScan(table=[[default, date_dim]], table:alias=[date_dim]) - diff --git ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query67.q.out ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query67.q.out deleted file mode 100644 index 41a3896053..0000000000 --- ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query67.q.out +++ /dev/null @@ -1,118 +0,0 @@ -PREHOOK: query: explain cbo -select * -from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rank() over (partition by i_category order by sumsales desc) rk - from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales - from store_sales - ,date_dim - ,store - ,item - where ss_sold_date_sk=d_date_sk - and ss_item_sk=i_item_sk - and ss_store_sk = s_store_sk - and d_month_seq between 1212 and 1212+11 - group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id))dw1) dw2 -where rk <= 100 -order by i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rk -limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@item -PREHOOK: Input: default@store -PREHOOK: Input: default@store_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select * -from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rank() over (partition by i_category order by sumsales desc) rk - from (select i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales - from store_sales - ,date_dim - ,store - ,item - where ss_sold_date_sk=d_date_sk - and ss_item_sk=i_item_sk - and ss_store_sk = s_store_sk - and d_month_seq between 1212 and 1212+11 - group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id))dw1) dw2 -where rk <= 100 -order by i_category - ,i_class - ,i_brand - ,i_product_name - ,d_year - ,d_qoy - ,d_moy - ,s_store_id - ,sumsales - ,rk -limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@item -POSTHOOK: Input: default@store -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveSortLimit(sort0=[$0], sort1=[$1], sort2=[$2], sort3=[$3], sort4=[$4], sort5=[$5], sort6=[$6], sort7=[$7], sort8=[$8], sort9=[$9], dir0=[ASC], dir1=[ASC], dir2=[ASC], dir3=[ASC], dir4=[ASC], dir5=[ASC], dir6=[ASC], dir7=[ASC], dir8=[ASC], dir9=[ASC], fetch=[100]) - HiveProject(i_category=[$0], i_class=[$1], i_brand=[$2], i_product_name=[$3], d_year=[$4], d_qoy=[$5], d_moy=[$6], s_store_id=[$7], sumsales=[$8], rank_window_0=[$9]) - HiveFilter(condition=[<=($9, 100)]) - HiveProject(i_category=[$2], i_class=[$1], i_brand=[$0], i_product_name=[$3], d_year=[$4], d_qoy=[$6], d_moy=[$5], s_store_id=[$7], sumsales=[$8], rank_window_0=[rank() OVER (PARTITION BY $2 ORDER BY $8 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)]) - HiveProject(i_brand=[$0], i_class=[$1], i_category=[$2], i_product_name=[$3], d_year=[$4], d_moy=[$5], d_qoy=[$6], s_store_id=[$7], $f8=[$8]) - HiveAggregate(group=[{1, 2, 3, 4, 6, 7, 8, 9}], groups=[[{1, 2, 3, 4, 6, 7, 8, 9}, {1, 2, 3, 4, 6, 7, 8}, {1, 2, 3, 4, 6, 8}, {1, 2, 3, 4, 6}, {1, 2, 3, 4}, {1, 2, 3}, {2, 3}, {3}, {}]], agg#0=[sum($10)]) - HiveJoin(condition=[=($5, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_brand=[$8], i_class=[$10], i_category=[$12], i_product_name=[$21]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveProject(ss_item_sk=[$1], d_year=[$5], d_moy=[$6], d_qoy=[$7], s_store_id=[$9], CASE=[$3]) - HiveJoin(condition=[=($2, $8)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ss_sold_date_sk=[$0], ss_item_sk=[$2], ss_store_sk=[$7], CASE=[CASE(AND(IS NOT NULL($13), IS NOT NULL($10)), *($13, CAST($10):DECIMAL(10, 0)), 0)]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($7))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(d_date_sk=[$0], d_year=[$6], d_moy=[$8], d_qoy=[$10]) - HiveFilter(condition=[BETWEEN(false, $3, 1212, 1223)]) - HiveTableScan(table=[[default, date_dim]], table:alias=[date_dim]) - HiveProject(s_store_sk=[$0], s_store_id=[$1]) - HiveTableScan(table=[[default, store]], table:alias=[store]) - diff --git ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query70.q.out ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query70.q.out deleted file mode 100644 index 7103b15c15..0000000000 --- ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query70.q.out +++ /dev/null @@ -1,119 +0,0 @@ -PREHOOK: query: explain cbo -select - sum(ss_net_profit) as total_sum - ,s_state - ,s_county - ,grouping(s_state)+grouping(s_county) as lochierarchy - ,rank() over ( - partition by grouping(s_state)+grouping(s_county), - case when grouping(s_county) = 0 then s_state end - order by sum(ss_net_profit) desc) as rank_within_parent - from - store_sales - ,date_dim d1 - ,store - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - and s_state in - ( select s_state - from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking - from store_sales, store, date_dim - where d_month_seq between 1212 and 1212+11 - and d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - group by s_state - ) tmp1 - where ranking <= 5 - ) - group by rollup(s_state,s_county) - order by - lochierarchy desc - ,case when lochierarchy = 0 then s_state end - ,rank_within_parent - limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@store -PREHOOK: Input: default@store_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select - sum(ss_net_profit) as total_sum - ,s_state - ,s_county - ,grouping(s_state)+grouping(s_county) as lochierarchy - ,rank() over ( - partition by grouping(s_state)+grouping(s_county), - case when grouping(s_county) = 0 then s_state end - order by sum(ss_net_profit) desc) as rank_within_parent - from - store_sales - ,date_dim d1 - ,store - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - and s_state in - ( select s_state - from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking - from store_sales, store, date_dim - where d_month_seq between 1212 and 1212+11 - and d_date_sk = ss_sold_date_sk - and s_store_sk = ss_store_sk - group by s_state - ) tmp1 - where ranking <= 5 - ) - group by rollup(s_state,s_county) - order by - lochierarchy desc - ,case when lochierarchy = 0 then s_state end - ,rank_within_parent - limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@store -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveProject(total_sum=[$0], s_state=[$1], s_county=[$2], lochierarchy=[$3], rank_within_parent=[$4]) - HiveSortLimit(sort0=[$3], sort1=[$5], sort2=[$4], dir0=[DESC-nulls-last], dir1=[ASC], dir2=[ASC], fetch=[100]) - HiveProject(total_sum=[$2], s_state=[$0], s_county=[$1], lochierarchy=[+(grouping($3, 1), grouping($3, 0))], rank_within_parent=[rank() OVER (PARTITION BY +(grouping($3, 1), grouping($3, 0)), CASE(=(grouping($3, 0), 0), $0, null) ORDER BY $2 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)], (tok_function when (= (tok_table_or_col lochierarchy) 0) (tok_table_or_col s_state))=[CASE(=(+(grouping($3, 1), grouping($3, 0)), 0), $0, null)]) - HiveProject($f0=[$0], $f1=[$1], $f2=[$2], GROUPING__ID=[$3]) - HiveAggregate(group=[{0, 1}], groups=[[{0, 1}, {0}, {}]], agg#0=[sum($2)], GROUPING__ID=[GROUPING__ID()]) - HiveProject($f0=[$7], $f1=[$6], $f2=[$2]) - HiveJoin(condition=[=($7, $8)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($5, $1)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($3, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ss_sold_date_sk=[$0], ss_store_sk=[$7], ss_net_profit=[$22]) - HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($7))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(d_date_sk=[$0], d_month_seq=[$3]) - HiveFilter(condition=[BETWEEN(false, $3, 1212, 1223)]) - HiveTableScan(table=[[default, date_dim]], table:alias=[d1]) - HiveProject(s_store_sk=[$0], s_county=[$23], s_state=[$24]) - HiveFilter(condition=[IS NOT NULL($24)]) - HiveTableScan(table=[[default, store]], table:alias=[store]) - HiveProject(s_state=[$0]) - HiveFilter(condition=[<=($1, 5)]) - HiveProject((tok_table_or_col s_state)=[$0], rank_window_0=[$1]) - HiveProject((tok_table_or_col s_state)=[$0], rank_window_0=[rank() OVER (PARTITION BY $0 ORDER BY $1 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)], window_col_0=[$1]) - HiveProject(s_state=[$0], $f1=[$1]) - HiveAggregate(group=[{5}], agg#0=[sum($2)]) - HiveJoin(condition=[=($4, $1)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveJoin(condition=[=($3, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ss_sold_date_sk=[$0], ss_store_sk=[$7], ss_net_profit=[$22]) - HiveFilter(condition=[AND(IS NOT NULL($7), IS NOT NULL($0))]) - HiveTableScan(table=[[default, store_sales]], table:alias=[store_sales]) - HiveProject(d_date_sk=[$0]) - HiveFilter(condition=[BETWEEN(false, $3, 1212, 1223)]) - HiveTableScan(table=[[default, date_dim]], table:alias=[date_dim]) - HiveProject(s_store_sk=[$0], s_state=[$24]) - HiveFilter(condition=[IS NOT NULL($24)]) - HiveTableScan(table=[[default, store]], table:alias=[store]) - diff --git ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query86.q.out ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query86.q.out deleted file mode 100644 index 8e89983463..0000000000 --- ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query86.q.out +++ /dev/null @@ -1,76 +0,0 @@ -PREHOOK: query: explain cbo -select - sum(ws_net_paid) as total_sum - ,i_category - ,i_class - ,grouping(i_category)+grouping(i_class) as lochierarchy - ,rank() over ( - partition by grouping(i_category)+grouping(i_class), - case when grouping(i_class) = 0 then i_category end - order by sum(ws_net_paid) desc) as rank_within_parent - from - web_sales - ,date_dim d1 - ,item - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ws_sold_date_sk - and i_item_sk = ws_item_sk - group by rollup(i_category,i_class) - order by - lochierarchy desc, - case when lochierarchy = 0 then i_category end, - rank_within_parent - limit 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@item -PREHOOK: Input: default@web_sales -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: explain cbo -select - sum(ws_net_paid) as total_sum - ,i_category - ,i_class - ,grouping(i_category)+grouping(i_class) as lochierarchy - ,rank() over ( - partition by grouping(i_category)+grouping(i_class), - case when grouping(i_class) = 0 then i_category end - order by sum(ws_net_paid) desc) as rank_within_parent - from - web_sales - ,date_dim d1 - ,item - where - d1.d_month_seq between 1212 and 1212+11 - and d1.d_date_sk = ws_sold_date_sk - and i_item_sk = ws_item_sk - group by rollup(i_category,i_class) - order by - lochierarchy desc, - case when lochierarchy = 0 then i_category end, - rank_within_parent - limit 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@item -POSTHOOK: Input: default@web_sales -POSTHOOK: Output: hdfs://### HDFS PATH ### -CBO PLAN: -HiveProject(total_sum=[$0], i_category=[$1], i_class=[$2], lochierarchy=[$3], rank_within_parent=[$4]) - HiveSortLimit(sort0=[$3], sort1=[$5], sort2=[$4], dir0=[DESC-nulls-last], dir1=[ASC], dir2=[ASC], fetch=[100]) - HiveProject(total_sum=[$2], i_category=[$0], i_class=[$1], lochierarchy=[+(grouping($3, 1), grouping($3, 0))], rank_within_parent=[rank() OVER (PARTITION BY +(grouping($3, 1), grouping($3, 0)), CASE(=(grouping($3, 0), 0), $0, null) ORDER BY $2 DESC NULLS LAST ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING)], (tok_function when (= (tok_table_or_col lochierarchy) 0) (tok_table_or_col i_category))=[CASE(=(+(grouping($3, 1), grouping($3, 0)), 0), $0, null)]) - HiveProject($f0=[$0], $f1=[$1], $f2=[$2], GROUPING__ID=[$3]) - HiveAggregate(group=[{0, 1}], groups=[[{0, 1}, {0}, {}]], agg#0=[sum($2)], GROUPING__ID=[GROUPING__ID()]) - HiveProject($f0=[$2], $f1=[$1], $f2=[$5]) - HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(i_item_sk=[$0], i_class=[$10], i_category=[$12]) - HiveTableScan(table=[[default, item]], table:alias=[item]) - HiveJoin(condition=[=($3, $0)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(ws_sold_date_sk=[$0], ws_item_sk=[$3], ws_net_paid=[$29]) - HiveFilter(condition=[IS NOT NULL($0)]) - HiveTableScan(table=[[default, web_sales]], table:alias=[web_sales]) - HiveProject(d_date_sk=[$0]) - HiveFilter(condition=[BETWEEN(false, $3, 1212, 1223)]) - HiveTableScan(table=[[default, date_dim]], table:alias=[d1]) - diff --git ql/src/test/results/clientpositive/repl_load_old_version.q.out ql/src/test/results/clientpositive/repl_load_old_version.q.out deleted file mode 100644 index 77116d1a91..0000000000 --- ql/src/test/results/clientpositive/repl_load_old_version.q.out +++ /dev/null @@ -1,85 +0,0 @@ -PREHOOK: query: REPL LOAD test_db from '../../data/files/repl_dump' with ('hive.exec.parallel'='false') -PREHOOK: type: REPLLOAD -POSTHOOK: query: REPL LOAD test_db from '../../data/files/repl_dump' with ('hive.exec.parallel'='false') -POSTHOOK: type: REPLLOAD -PREHOOK: query: use test_db -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:test_db -POSTHOOK: query: use test_db -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:test_db -PREHOOK: query: show tables -PREHOOK: type: SHOWTABLES -PREHOOK: Input: database:test_db -POSTHOOK: query: show tables -POSTHOOK: type: SHOWTABLES -POSTHOOK: Input: database:test_db -tbl -tbl1 -tbl2 -tbl3 -tbl4 -tbl5 -tbl6 -PREHOOK: query: select * from tbl1 order by fld -PREHOOK: type: QUERY -PREHOOK: Input: test_db@tbl1 -#### A masked pattern was here #### -POSTHOOK: query: select * from tbl1 order by fld -POSTHOOK: type: QUERY -POSTHOOK: Input: test_db@tbl1 -#### A masked pattern was here #### -1 -1 -1 -PREHOOK: query: select * from tbl2 order by fld -PREHOOK: type: QUERY -PREHOOK: Input: test_db@tbl2 -#### A masked pattern was here #### -POSTHOOK: query: select * from tbl2 order by fld -POSTHOOK: type: QUERY -POSTHOOK: Input: test_db@tbl2 -#### A masked pattern was here #### -1 -1 -PREHOOK: query: select * from tbl3 order by fld -PREHOOK: type: QUERY -PREHOOK: Input: test_db@tbl3 -#### A masked pattern was here #### -POSTHOOK: query: select * from tbl3 order by fld -POSTHOOK: type: QUERY -POSTHOOK: Input: test_db@tbl3 -#### A masked pattern was here #### -1 -PREHOOK: query: select * from tbl4 order by fld -PREHOOK: type: QUERY -PREHOOK: Input: test_db@tbl4 -#### A masked pattern was here #### -POSTHOOK: query: select * from tbl4 order by fld -POSTHOOK: type: QUERY -POSTHOOK: Input: test_db@tbl4 -#### A masked pattern was here #### -1 -PREHOOK: query: select * from tbl5 order by fld -PREHOOK: type: QUERY -PREHOOK: Input: test_db@tbl5 -#### A masked pattern was here #### -POSTHOOK: query: select * from tbl5 order by fld -POSTHOOK: type: QUERY -POSTHOOK: Input: test_db@tbl5 -#### A masked pattern was here #### -1 -PREHOOK: query: select * from tbl6 order by fld1 -PREHOOK: type: QUERY -PREHOOK: Input: test_db@tbl6 -PREHOOK: Input: test_db@tbl6@fld1=1 -PREHOOK: Input: test_db@tbl6@fld1=2 -#### A masked pattern was here #### -POSTHOOK: query: select * from tbl6 order by fld1 -POSTHOOK: type: QUERY -POSTHOOK: Input: test_db@tbl6 -POSTHOOK: Input: test_db@tbl6@fld1=1 -POSTHOOK: Input: test_db@tbl6@fld1=2 -#### A masked pattern was here #### -1 1 -1 2 diff --git ql/src/test/results/clientpositive/sample2.q.out ql/src/test/results/clientpositive/sample2.q.out deleted file mode 100644 index dd1e76ff2b..0000000000 --- ql/src/test/results/clientpositive/sample2.q.out +++ /dev/null @@ -1,675 +0,0 @@ -PREHOOK: query: CREATE TABLE dest1_n29(key INT, value STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@dest1_n29 -POSTHOOK: query: CREATE TABLE dest1_n29(key INT, value STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1_n29 -PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1_n29 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1_n29 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: true - predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - sampleDesc: BUCKET 1 OUT OF 2 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n29 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Select Operator - expressions: _col0 (type: int), _col1 (type: string) - outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: struct), _col1 (type: struct) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: 000000_0 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count 2 - bucket_field_name key - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.srcbucket - numFiles 2 - numRows 1000 - rawDataSize 10603 - serialization.ddl struct srcbucket { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 11603 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count 2 - bucket_field_name key - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.srcbucket - numFiles 2 - numRows 1000 - rawDataSize 10603 - serialization.ddl struct srcbucket { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 11603 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket - name: default.srcbucket - Truncated Path -> Alias: - /srcbucket/000000_0 [s] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types struct:struct - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n29 - - Stage: Stage-2 - Stats Work - Basic Stats Work: -#### A masked pattern was here #### - Column Stats Desc: - Columns: key, value - Column Types: int, string - Table: default.dest1_n29 - Is Table Level Stats: true - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n29 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10002 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n29 - name: default.dest1_n29 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n29 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10002 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n29 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n29 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n29 - name: default.dest1_n29 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE dest1_n29 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1_n29 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n29 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1_n29 -POSTHOOK: Lineage: dest1_n29.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1_n29.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1_n29.* FROM dest1_n29 -order by key, value -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1_n29 -#### A masked pattern was here #### -POSTHOOK: query: SELECT dest1_n29.* FROM dest1_n29 -order by key, value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1_n29 -#### A masked pattern was here #### -2 val_2 -2 val_3 -6 val_7 -6 val_7 -10 val_10 -10 val_11 -20 val_20 -20 val_21 -20 val_21 -30 val_30 -30 val_31 -40 val_41 -40 val_41 -46 val_47 -48 val_49 -48 val_49 -54 val_54 -58 val_58 -58 val_58 -58 val_59 -58 val_59 -60 val_61 -64 val_64 -68 val_69 -70 val_70 -70 val_70 -70 val_70 -70 val_71 -80 val_80 -80 val_81 -84 val_84 -84 val_84 -86 val_86 -86 val_87 -90 val_90 -90 val_90 -90 val_90 -96 val_96 -98 val_98 -98 val_98 -106 val_107 -110 val_111 -116 val_116 -116 val_117 -126 val_126 -126 val_127 -126 val_127 -132 val_133 -132 val_133 -134 val_134 -134 val_134 -134 val_135 -140 val_141 -146 val_146 -146 val_146 -156 val_156 -156 val_157 -156 val_157 -158 val_158 -162 val_162 -162 val_163 -164 val_164 -164 val_164 -164 val_165 -164 val_165 -178 val_178 -178 val_179 -178 val_179 -182 val_183 -184 val_185 -190 val_190 -202 val_202 -206 val_207 -206 val_207 -206 val_207 -208 val_208 -208 val_208 -208 val_208 -212 val_213 -214 val_214 -216 val_216 -216 val_216 -216 val_217 -226 val_226 -226 val_227 -226 val_227 -226 val_227 -226 val_227 -238 val_238 -238 val_238 -238 val_239 -240 val_241 -244 val_244 -244 val_245 -244 val_245 -244 val_245 -248 val_248 -248 val_249 -252 val_252 -252 val_253 -254 val_255 -256 val_256 -256 val_256 -256 val_257 -260 val_260 -260 val_261 -260 val_261 -266 val_266 -272 val_272 -272 val_272 -272 val_273 -276 val_277 -284 val_284 -284 val_285 -284 val_285 -286 val_286 -286 val_287 -292 val_292 -292 val_293 -292 val_293 -304 val_305 -308 val_308 -308 val_309 -308 val_309 -310 val_310 -310 val_311 -310 val_311 -310 val_311 -316 val_316 -316 val_316 -316 val_316 -324 val_325 -326 val_327 -332 val_332 -334 val_335 -336 val_336 -336 val_337 -338 val_338 -338 val_339 -342 val_342 -342 val_342 -342 val_343 -344 val_344 -344 val_344 -344 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_349 -352 val_353 -352 val_353 -360 val_360 -360 val_361 -362 val_362 -364 val_364 -364 val_365 -368 val_368 -378 val_378 -378 val_379 -384 val_384 -384 val_384 -384 val_384 -384 val_385 -384 val_385 -384 val_385 -386 val_386 -386 val_387 -386 val_387 -388 val_389 -392 val_392 -392 val_393 -392 val_393 -394 val_394 -396 val_396 -396 val_396 -396 val_396 -402 val_402 -402 val_403 -402 val_403 -402 val_403 -404 val_404 -404 val_404 -404 val_405 -404 val_405 -404 val_405 -408 val_409 -408 val_409 -410 val_411 -414 val_414 -414 val_414 -414 val_415 -426 val_427 -428 val_429 -430 val_430 -430 val_430 -430 val_430 -430 val_431 -432 val_432 -432 val_433 -440 val_441 -440 val_441 -444 val_444 -446 val_446 -446 val_447 -446 val_447 -452 val_452 -454 val_454 -454 val_454 -454 val_454 -454 val_455 -454 val_455 -458 val_458 -458 val_458 -466 val_466 -466 val_466 -466 val_466 -472 val_472 -474 val_475 -474 val_475 -476 val_477 -476 val_477 -478 val_478 -478 val_478 -478 val_479 -478 val_479 -480 val_480 -480 val_480 -480 val_480 -480 val_481 -480 val_481 -482 val_482 -482 val_483 -484 val_484 -484 val_485 -488 val_489 -490 val_490 -490 val_491 -498 val_498 -498 val_498 -498 val_498 diff --git ql/src/test/results/clientpositive/sample4.q.out ql/src/test/results/clientpositive/sample4.q.out deleted file mode 100644 index 39ea73749b..0000000000 --- ql/src/test/results/clientpositive/sample4.q.out +++ /dev/null @@ -1,675 +0,0 @@ -PREHOOK: query: CREATE TABLE dest1_n118(key INT, value STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@dest1_n118 -POSTHOOK: query: CREATE TABLE dest1_n118(key INT, value STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1_n118 -PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1_n118 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1_n118 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-2 depends on stages: Stage-0 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: true - predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - sampleDesc: BUCKET 1 OUT OF 2 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n118 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Select Operator - expressions: _col0 (type: int), _col1 (type: string) - outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: struct), _col1 (type: struct) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: 000000_0 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count 2 - bucket_field_name key - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.srcbucket - numFiles 2 - numRows 1000 - rawDataSize 10603 - serialization.ddl struct srcbucket { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 11603 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count 2 - bucket_field_name key - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.srcbucket - numFiles 2 - numRows 1000 - rawDataSize 10603 - serialization.ddl struct srcbucket { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 11603 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket - name: default.srcbucket - Truncated Path -> Alias: - /srcbucket/000000_0 [s] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types struct:struct - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n118 - - Stage: Stage-2 - Stats Work - Basic Stats Work: -#### A masked pattern was here #### - Column Stats Desc: - Columns: key, value - Column Types: int, string - Table: default.dest1_n118 - Is Table Level Stats: true - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n118 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10002 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n118 - name: default.dest1_n118 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n118 - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10002 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.dest1_n118 - numFiles 0 - numRows 0 - rawDataSize 0 - serialization.ddl struct dest1_n118 { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1_n118 - name: default.dest1_n118 - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE dest1_n118 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s -PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1_n118 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n118 SELECT s.* -FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1_n118 -POSTHOOK: Lineage: dest1_n118.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1_n118.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1_n118.* FROM dest1_n118 -order by key, value -PREHOOK: type: QUERY -PREHOOK: Input: default@dest1_n118 -#### A masked pattern was here #### -POSTHOOK: query: SELECT dest1_n118.* FROM dest1_n118 -order by key, value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1_n118 -#### A masked pattern was here #### -2 val_2 -2 val_3 -6 val_7 -6 val_7 -10 val_10 -10 val_11 -20 val_20 -20 val_21 -20 val_21 -30 val_30 -30 val_31 -40 val_41 -40 val_41 -46 val_47 -48 val_49 -48 val_49 -54 val_54 -58 val_58 -58 val_58 -58 val_59 -58 val_59 -60 val_61 -64 val_64 -68 val_69 -70 val_70 -70 val_70 -70 val_70 -70 val_71 -80 val_80 -80 val_81 -84 val_84 -84 val_84 -86 val_86 -86 val_87 -90 val_90 -90 val_90 -90 val_90 -96 val_96 -98 val_98 -98 val_98 -106 val_107 -110 val_111 -116 val_116 -116 val_117 -126 val_126 -126 val_127 -126 val_127 -132 val_133 -132 val_133 -134 val_134 -134 val_134 -134 val_135 -140 val_141 -146 val_146 -146 val_146 -156 val_156 -156 val_157 -156 val_157 -158 val_158 -162 val_162 -162 val_163 -164 val_164 -164 val_164 -164 val_165 -164 val_165 -178 val_178 -178 val_179 -178 val_179 -182 val_183 -184 val_185 -190 val_190 -202 val_202 -206 val_207 -206 val_207 -206 val_207 -208 val_208 -208 val_208 -208 val_208 -212 val_213 -214 val_214 -216 val_216 -216 val_216 -216 val_217 -226 val_226 -226 val_227 -226 val_227 -226 val_227 -226 val_227 -238 val_238 -238 val_238 -238 val_239 -240 val_241 -244 val_244 -244 val_245 -244 val_245 -244 val_245 -248 val_248 -248 val_249 -252 val_252 -252 val_253 -254 val_255 -256 val_256 -256 val_256 -256 val_257 -260 val_260 -260 val_261 -260 val_261 -266 val_266 -272 val_272 -272 val_272 -272 val_273 -276 val_277 -284 val_284 -284 val_285 -284 val_285 -286 val_286 -286 val_287 -292 val_292 -292 val_293 -292 val_293 -304 val_305 -308 val_308 -308 val_309 -308 val_309 -310 val_310 -310 val_311 -310 val_311 -310 val_311 -316 val_316 -316 val_316 -316 val_316 -324 val_325 -326 val_327 -332 val_332 -334 val_335 -336 val_336 -336 val_337 -338 val_338 -338 val_339 -342 val_342 -342 val_342 -342 val_343 -344 val_344 -344 val_344 -344 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_349 -352 val_353 -352 val_353 -360 val_360 -360 val_361 -362 val_362 -364 val_364 -364 val_365 -368 val_368 -378 val_378 -378 val_379 -384 val_384 -384 val_384 -384 val_384 -384 val_385 -384 val_385 -384 val_385 -386 val_386 -386 val_387 -386 val_387 -388 val_389 -392 val_392 -392 val_393 -392 val_393 -394 val_394 -396 val_396 -396 val_396 -396 val_396 -402 val_402 -402 val_403 -402 val_403 -402 val_403 -404 val_404 -404 val_404 -404 val_405 -404 val_405 -404 val_405 -408 val_409 -408 val_409 -410 val_411 -414 val_414 -414 val_414 -414 val_415 -426 val_427 -428 val_429 -430 val_430 -430 val_430 -430 val_430 -430 val_431 -432 val_432 -432 val_433 -440 val_441 -440 val_441 -444 val_444 -446 val_446 -446 val_447 -446 val_447 -452 val_452 -454 val_454 -454 val_454 -454 val_454 -454 val_455 -454 val_455 -458 val_458 -458 val_458 -466 val_466 -466 val_466 -466 val_466 -472 val_472 -474 val_475 -474 val_475 -476 val_477 -476 val_477 -478 val_478 -478 val_478 -478 val_479 -478 val_479 -480 val_480 -480 val_480 -480 val_480 -480 val_481 -480 val_481 -482 val_482 -482 val_483 -484 val_484 -484 val_485 -488 val_489 -490 val_490 -490 val_491 -498 val_498 -498 val_498 -498 val_498 diff --git ql/src/test/results/clientpositive/sample_islocalmode_hook_hadoop20.q.out ql/src/test/results/clientpositive/sample_islocalmode_hook_hadoop20.q.out deleted file mode 100644 index 8eb73010e8..0000000000 --- ql/src/test/results/clientpositive/sample_islocalmode_hook_hadoop20.q.out +++ /dev/null @@ -1,98 +0,0 @@ -PREHOOK: query: USE default -PREHOOK: type: SWITCHDATABASE -POSTHOOK: query: USE default -POSTHOOK: type: SWITCHDATABASE -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) --- This test sets mapred.max.split.size=300 and hive.merge.smallfiles.avgsize=1 --- in an attempt to force the generation of multiple splits and multiple output files. --- However, Hadoop 0.20 is incapable of generating splits smaller than the block size --- when using CombineFileInputFormat, so only one split is generated. This has a --- significant impact on the results of the TABLESAMPLE(x PERCENT). This issue was --- fixed in MAPREDUCE-2046 which is included in 0.22. - --- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) --- This test sets mapred.max.split.size=300 and hive.merge.smallfiles.avgsize=1 --- in an attempt to force the generation of multiple splits and multiple output files. --- However, Hadoop 0.20 is incapable of generating splits smaller than the block size --- when using CombineFileInputFormat, so only one split is generated. This has a --- significant impact on the results of the TABLESAMPLE(x PERCENT). This issue was --- fixed in MAPREDUCE-2046 which is included in 0.22. - --- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@sih_i_part -PREHOOK: query: insert overwrite table sih_i_part partition (p='1') select key, value from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@sih_i_part@p=1 -POSTHOOK: query: insert overwrite table sih_i_part partition (p='1') select key, value from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@sih_i_part@p=1 -POSTHOOK: Lineage: sih_i_part PARTITION(p=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: sih_i_part PARTITION(p=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table sih_i_part partition (p='2') select key+10000, value from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@sih_i_part@p=2 -POSTHOOK: query: insert overwrite table sih_i_part partition (p='2') select key+10000, value from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@sih_i_part@p=2 -POSTHOOK: Lineage: sih_i_part PARTITION(p=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: sih_i_part PARTITION(p=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table sih_i_part partition (p='3') select key+20000, value from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@sih_i_part@p=3 -POSTHOOK: query: insert overwrite table sih_i_part partition (p='3') select key+20000, value from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@sih_i_part@p=3 -POSTHOOK: Lineage: sih_i_part PARTITION(p=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: sih_i_part PARTITION(p=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table sih_src as select key, value from sih_i_part order by key, value -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@sih_i_part -PREHOOK: Input: default@sih_i_part@p=1 -PREHOOK: Input: default@sih_i_part@p=2 -PREHOOK: Input: default@sih_i_part@p=3 -POSTHOOK: query: create table sih_src as select key, value from sih_i_part order by key, value -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@sih_i_part -POSTHOOK: Input: default@sih_i_part@p=1 -POSTHOOK: Input: default@sih_i_part@p=2 -POSTHOOK: Input: default@sih_i_part@p=3 -POSTHOOK: Output: default@sih_src -PREHOOK: query: create table sih_src2 as select key, value from sih_src order by key, value -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@sih_src -POSTHOOK: query: create table sih_src2 as select key, value from sih_src order by key, value -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@sih_src -POSTHOOK: Output: default@sih_src2 -PREHOOK: query: -- Sample split, running locally limited by num tasks -select count(1) from sih_src tablesample(1 percent) -PREHOOK: type: QUERY -PREHOOK: Input: default@sih_src -#### A masked pattern was here #### -1500 -PREHOOK: query: -- sample two tables -select count(1) from sih_src tablesample(1 percent)a join sih_src2 tablesample(1 percent)b on a.key = b.key -PREHOOK: type: QUERY -PREHOOK: Input: default@sih_src -PREHOOK: Input: default@sih_src2 -#### A masked pattern was here #### -3084 -PREHOOK: query: -- sample split, running locally limited by max bytes -select count(1) from sih_src tablesample(1 percent) -PREHOOK: type: QUERY -PREHOOK: Input: default@sih_src -#### A masked pattern was here #### -1500 diff --git ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out deleted file mode 100644 index 12d932553e..0000000000 --- ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out +++ /dev/null @@ -1,104 +0,0 @@ -PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product -PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM ( -SELECT tmp1.name as name FROM ( - SELECT name, 'MMM' AS n FROM T1) tmp1 - JOIN (SELECT 'MMM' AS n FROM T1) tmp2 - JOIN (SELECT 'MMM' AS n FROM T1) tmp3 - ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM ( -SELECT tmp1.name as name FROM ( - SELECT name, 'MMM' AS n FROM T1) tmp1 - JOIN (SELECT 'MMM' AS n FROM T1) tmp2 - JOIN (SELECT 'MMM' AS n FROM T1) tmp3 - ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE T3(name STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(name STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: SELECT COUNT(1) FROM T2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT COUNT(1) FROM T2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: hdfs://### HDFS PATH ### -5000000 -PREHOOK: query: SELECT COUNT(1) FROM T3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT COUNT(1) FROM T3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -POSTHOOK: Output: hdfs://### HDFS PATH ### -1000 -PREHOOK: query: SELECT COUNT(1) FROM T2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT COUNT(1) FROM T2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: hdfs://### HDFS PATH ### -5000000 -PREHOOK: query: SELECT COUNT(1) FROM T3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT COUNT(1) FROM T3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -POSTHOOK: Output: hdfs://### HDFS PATH ### -1000 diff --git ql/src/test/results/clientpositive/tez/acid_vectorization_original.q.out ql/src/test/results/clientpositive/tez/acid_vectorization_original.q.out deleted file mode 100644 index 5fb57623c2..0000000000 --- ql/src/test/results/clientpositive/tez/acid_vectorization_original.q.out +++ /dev/null @@ -1,740 +0,0 @@ -PREHOOK: query: CREATE TEMPORARY FUNCTION runWorker AS 'org.apache.hadoop.hive.ql.udf.UDFRunWorker' -PREHOOK: type: CREATEFUNCTION -PREHOOK: Output: runworker -POSTHOOK: query: CREATE TEMPORARY FUNCTION runWorker AS 'org.apache.hadoop.hive.ql.udf.UDFRunWorker' -POSTHOOK: type: CREATEFUNCTION -POSTHOOK: Output: runworker -PREHOOK: query: create table mydual(a int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@mydual -POSTHOOK: query: create table mydual(a int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@mydual -PREHOOK: query: insert into mydual values(1) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@mydual -POSTHOOK: query: insert into mydual values(1) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@mydual -POSTHOOK: Lineage: mydual.a SCRIPT [] -PREHOOK: query: CREATE TABLE over10k_n2(t tinyint, - si smallint, - i int, - b bigint, - f float, - d double, - bo boolean, - s string, - ts timestamp, - `dec` decimal(4,2), - bin binary) -ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' -STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@over10k_n2 -POSTHOOK: query: CREATE TABLE over10k_n2(t tinyint, - si smallint, - i int, - b bigint, - f float, - d double, - bo boolean, - s string, - ts timestamp, - `dec` decimal(4,2), - bin binary) -ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' -STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k_n2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k_n2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@over10k_n2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k_n2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@over10k_n2 -PREHOOK: query: CREATE TABLE over10k_orc_bucketed(t tinyint, - si smallint, - i int, - b bigint, - f float, - d double, - bo boolean, - s string, - ts timestamp, - `dec` decimal(4,2), - bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: query: CREATE TABLE over10k_orc_bucketed(t tinyint, - si smallint, - i int, - b bigint, - f float, - d double, - bo boolean, - s string, - ts timestamp, - `dec` decimal(4,2), - bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k_orc_bucketed -PREHOOK: query: select distinct si, si%4 from over10k_n2 order by si -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_n2 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select distinct si, si%4 from over10k_n2 order by si -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_n2 -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL NULL -256 0 -257 1 -258 2 -259 3 -260 0 -261 1 -262 2 -263 3 -264 0 -265 1 -266 2 -267 3 -268 0 -269 1 -270 2 -271 3 -272 0 -273 1 -274 2 -275 3 -276 0 -277 1 -278 2 -279 3 -280 0 -281 1 -282 2 -283 3 -284 0 -285 1 -286 2 -287 3 -288 0 -289 1 -290 2 -291 3 -292 0 -293 1 -294 2 -295 3 -296 0 -297 1 -298 2 -299 3 -300 0 -301 1 -302 2 -303 3 -304 0 -305 1 -306 2 -307 3 -308 0 -309 1 -310 2 -311 3 -312 0 -313 1 -314 2 -315 3 -316 0 -317 1 -318 2 -319 3 -320 0 -321 1 -322 2 -323 3 -324 0 -325 1 -326 2 -327 3 -328 0 -329 1 -330 2 -331 3 -332 0 -333 1 -334 2 -335 3 -336 0 -337 1 -338 2 -339 3 -340 0 -341 1 -342 2 -343 3 -344 0 -345 1 -346 2 -347 3 -348 0 -349 1 -350 2 -351 3 -352 0 -353 1 -354 2 -355 3 -356 0 -357 1 -358 2 -359 3 -360 0 -361 1 -362 2 -363 3 -364 0 -365 1 -366 2 -367 3 -368 0 -370 2 -371 3 -372 0 -373 1 -374 2 -375 3 -376 0 -377 1 -378 2 -379 3 -380 0 -381 1 -382 2 -383 3 -384 0 -385 1 -386 2 -387 3 -388 0 -389 1 -390 2 -391 3 -392 0 -393 1 -394 2 -395 3 -396 0 -397 1 -398 2 -399 3 -400 0 -401 1 -402 2 -403 3 -404 0 -405 1 -406 2 -407 3 -408 0 -409 1 -410 2 -411 3 -413 1 -414 2 -415 3 -417 1 -418 2 -419 3 -420 0 -421 1 -422 2 -423 3 -424 0 -425 1 -426 2 -427 3 -428 0 -429 1 -430 2 -431 3 -432 0 -433 1 -434 2 -435 3 -436 0 -437 1 -438 2 -439 3 -440 0 -441 1 -442 2 -443 3 -444 0 -445 1 -446 2 -447 3 -448 0 -449 1 -450 2 -451 3 -452 0 -453 1 -454 2 -455 3 -456 0 -457 1 -458 2 -459 3 -460 0 -461 1 -462 2 -463 3 -464 0 -465 1 -466 2 -467 3 -468 0 -469 1 -471 3 -472 0 -473 1 -474 2 -475 3 -476 0 -477 1 -478 2 -479 3 -480 0 -481 1 -482 2 -483 3 -484 0 -485 1 -486 2 -487 3 -488 0 -489 1 -490 2 -491 3 -492 0 -493 1 -494 2 -495 3 -496 0 -497 1 -498 2 -499 3 -500 0 -501 1 -502 2 -503 3 -504 0 -505 1 -506 2 -507 3 -508 0 -509 1 -510 2 -511 3 -PREHOOK: query: insert into over10k_orc_bucketed select * from over10k_n2 -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_n2 -PREHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: query: insert into over10k_orc_bucketed select * from over10k_n2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_n2 -POSTHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: Lineage: over10k_orc_bucketed.b SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.bin SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.bo SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.d SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.dec SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.f SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.i SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.s SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.si SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.t SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.ts SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:ts, type:timestamp, comment:null), ] -Found 4 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 8903 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7698 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7273 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7180 ### HDFS DATE ### hdfs://### HDFS PATH ### -PREHOOK: query: insert into over10k_orc_bucketed select * from over10k_n2 -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_n2 -PREHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: query: insert into over10k_orc_bucketed select * from over10k_n2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_n2 -POSTHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: Lineage: over10k_orc_bucketed.b SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.bin SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.bo SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.d SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.dec SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.f SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.i SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.s SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.si SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.t SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: over10k_orc_bucketed.ts SIMPLE [(over10k_n2)over10k_n2.FieldSchema(name:ts, type:timestamp, comment:null), ] -Found 8 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 8903 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 8903 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7698 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7698 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7273 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7273 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7180 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7180 ### HDFS DATE ### hdfs://### HDFS PATH ### -PREHOOK: query: select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -7 hdfs://### HDFS PATH ### -PREHOOK: query: alter table over10k_orc_bucketed set TBLPROPERTIES ('transactional'='true') -PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: query: alter table over10k_orc_bucketed set TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: default@over10k_orc_bucketed -PREHOOK: query: explain select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i -PREHOOK: type: QUERY -POSTHOOK: query: explain select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) - Statistics: Num rows: 2098 Data size: 41920 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) - Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: t (type: tinyint), si (type: smallint), i (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int) - sort order: +++ - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized - Reducer 2 - Execution mode: vectorized - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: hdfs://### HDFS PATH ### --3 344 65733 --3 344 65733 -5 501 65585 -5 501 65585 -35 463 65646 -35 463 65646 -PREHOOK: query: explain select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID -PREHOOK: type: QUERY -POSTHOOK: query: explain select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) - Statistics: Num rows: 2098 Data size: 41920 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) - Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ROW__ID (type: struct), t (type: tinyint), si (type: smallint), i (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: struct) - sort order: + - Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: tinyint), _col2 (type: smallint), _col3 (type: int) - Execution mode: vectorized - Reducer 2 - Execution mode: vectorized - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: hdfs://### HDFS PATH ### -{"writeid":0,"bucketid":536870912,"rowid":104} 5 501 65585 -{"writeid":0,"bucketid":536870912,"rowid":420} 5 501 65585 -{"writeid":0,"bucketid":536936448,"rowid":37} -3 344 65733 -{"writeid":0,"bucketid":536936448,"rowid":295} -3 344 65733 -{"writeid":0,"bucketid":537067520,"rowid":173} 35 463 65646 -{"writeid":0,"bucketid":537067520,"rowid":406} 35 463 65646 -PREHOOK: query: explain update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100 -PREHOOK: type: QUERY -POSTHOOK: query: explain update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) - Statistics: Num rows: 2098 Data size: 706986 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) - Statistics: Num rows: 2 Data size: 674 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ROW__ID (type: struct), t (type: tinyint), si (type: smallint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 2 Data size: 834 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: struct) - sort order: + - Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 2 Data size: 834 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: tinyint), _col2 (type: smallint), _col5 (type: float), _col6 (type: double), _col7 (type: boolean), _col8 (type: string), _col9 (type: timestamp), _col10 (type: decimal(4,2)), _col11 (type: binary) - Execution mode: vectorized - Reducer 2 - Execution mode: vectorized - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), 0 (type: int), 4294967363L (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: boolean), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: decimal(4,2)), VALUE._col9 (type: binary) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 2 Data size: 834 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 834 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.over10k_orc_bucketed - Write Type: UPDATE - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.over10k_orc_bucketed - Write Type: UPDATE - - Stage: Stage-3 - Stats Work - Basic Stats Work: - -PREHOOK: query: update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: default@over10k_orc_bucketed -POSTHOOK: query: update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: default@over10k_orc_bucketed -PREHOOK: query: select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: hdfs://### HDFS PATH ### -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 5 501 0 -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 5 501 0 -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} -3 344 0 -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} -3 344 0 -{"writeid":### Masked writeid ###,"bucketid":537067520,"rowid":0} 35 463 0 -{"writeid":### Masked writeid ###,"bucketid":537067520,"rowid":1} 35 463 0 -PREHOOK: query: explain select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1 -PREHOOK: type: QUERY -POSTHOOK: query: explain select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: over10k_orc_bucketed - Statistics: Num rows: 1234 Data size: 706090 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ROW__ID (type: struct) - outputColumnNames: ROW__ID - Statistics: Num rows: 1234 Data size: 706090 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - keys: ROW__ID (type: struct) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 617 Data size: 51828 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: struct) - sort order: + - Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 617 Data size: 51828 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: bigint) - Reducer 2 - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: struct) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 617 Data size: 51828 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 205 Data size: 17220 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 205 Data size: 17220 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: hdfs://### HDFS PATH ### -PREHOOK: query: select ROW__ID, * from over10k_orc_bucketed where ROW__ID is null -PREHOOK: type: QUERY -PREHOOK: Input: default@over10k_orc_bucketed -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select ROW__ID, * from over10k_orc_bucketed where ROW__ID is null -POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k_orc_bucketed -POSTHOOK: Output: hdfs://### HDFS PATH ### diff --git ql/src/test/results/clientpositive/tez/merge_test_dummy_operator.q.out ql/src/test/results/clientpositive/tez/merge_test_dummy_operator.q.out deleted file mode 100644 index 22c64a16de..0000000000 --- ql/src/test/results/clientpositive/tez/merge_test_dummy_operator.q.out +++ /dev/null @@ -1,419 +0,0 @@ -PREHOOK: query: create table common_join_table (id string, - col1 string, - date_created date, - col2 string, - col3 string, - time_stamp timestamp, - col4 date, - col4key bigint, - col5 date, - col6 string, - col7 string, - col8 smallint) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@common_join_table -POSTHOOK: query: create table common_join_table (id string, - col1 string, - date_created date, - col2 string, - col3 string, - time_stamp timestamp, - col4 date, - col4key bigint, - col5 date, - col6 string, - col7 string, - col8 smallint) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@common_join_table -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: insert into common_join_table values ('id', '109515', null, 'test', 'test', '2018-01-10 15:03:55.0', '2018-01-10', 109515, null, '45045501', 'id', null) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@common_join_table -POSTHOOK: Lineage: common_join_table.col1 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col2 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col3 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col4key SCRIPT [] -POSTHOOK: Lineage: common_join_table.col5 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.col6 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col7 SCRIPT [] -POSTHOOK: Lineage: common_join_table.col8 EXPRESSION [] -POSTHOOK: Lineage: common_join_table.date_created EXPRESSION [] -POSTHOOK: Lineage: common_join_table.id SCRIPT [] -POSTHOOK: Lineage: common_join_table.time_stamp SCRIPT [] -PREHOOK: query: WITH temp_tbl_1 AS ( -SELECT col7 - ,col4KEY - ,COUNT(*) AS temp_result_1 - FROM common_join_table - GROUP BY col7, col4KEY -), - -temp_tbl_2 AS ( -SELECT col7 - ,col4KEY - ,temp_result_1 - ,ROW_NUMBER() OVER(PARTITION BY col7 ORDER BY col4KEY ASC) AS temp_result_2 - FROM temp_tbl_1 -), - -temp_tbl_3 AS ( -SELECT col7 - ,MIN(col4KEY) AS START_DATE - ,MAX(col4KEY) AS END_DATE - FROM temp_tbl_2 - GROUP BY col7 -), - - -temp_tbl_4 AS ( -SELECT D1.col7 - ,D1.col4KEY - ,D1.temp_result_2 - ,D1.temp_result_1 - ,CASE WHEN D2.col4KEY-D1.col4KEY > 30 THEN D1.col4KEY - WHEN D1.col4KEY = M.END_DATE THEN D1.col4KEY ELSE 0 END AS temp_result_3 - ,CASE WHEN D2.col4KEY-D1.col4KEY > 30 THEN D2.col4KEY - WHEN D1.col4KEY = M.START_DATE THEN D1.col4KEY ELSE 0 END AS temp_result_4 - FROM temp_tbl_2 D1 - INNER JOIN temp_tbl_3 M - ON D1.col7 = M.col7 - LEFT JOIN temp_tbl_2 D2 - ON D1.col7 = D2.col7 - AND D1.temp_result_2 = D2.temp_result_2+1 -), - -temp_tbl_5 AS ( -SELECT S1.col7 - ,S1.col4KEY - ,S1.temp_result_2 - ,S1.temp_result_1 - ,CASE WHEN S1.col4KEY >= S2.temp_result_4 - AND S1.col4KEY <= S3.temp_result_3 - THEN 1 ELSE 0 END AS temp_result_5 - FROM temp_tbl_4 S1 - LEFT JOIN temp_tbl_4 S2 - ON S1.col7 = S2.col7 - AND S2.temp_result_4 != 0 - LEFT JOIN temp_tbl_4 S3 - ON S1.col7 = S3.col7 - AND S3.temp_result_3 != 0 -), - -temp_tbl_6 AS ( -SELECT col7 - ,col4KEY - ,temp_result_2 - ,temp_result_1 - ,SUM(temp_result_5) AS temp_result_5 - FROM temp_tbl_5 - GROUP BY col7 - ,col4KEY - ,temp_result_2 - ,temp_result_1 -), - -temp_tbl_7 AS ( -SELECT col7 - ,SUM(temp_result_2) AS temp_result_6 - ,SUM(temp_result_1) AS temp_result_1 - FROM temp_tbl_6 - GROUP BY col7 -) - -SELECT S.* - FROM temp_tbl_6 S - INNER JOIN - temp_tbl_7 F - ON S.col7 = F.col7 - --WHERE F.temp_result_6 < 40 - --AND F.temp_result_1 < 200 -PREHOOK: type: QUERY -PREHOOK: Input: default@common_join_table -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: WITH temp_tbl_1 AS ( -SELECT col7 - ,col4KEY - ,COUNT(*) AS temp_result_1 - FROM common_join_table - GROUP BY col7, col4KEY -), - -temp_tbl_2 AS ( -SELECT col7 - ,col4KEY - ,temp_result_1 - ,ROW_NUMBER() OVER(PARTITION BY col7 ORDER BY col4KEY ASC) AS temp_result_2 - FROM temp_tbl_1 -), - -temp_tbl_3 AS ( -SELECT col7 - ,MIN(col4KEY) AS START_DATE - ,MAX(col4KEY) AS END_DATE - FROM temp_tbl_2 - GROUP BY col7 -), - - -temp_tbl_4 AS ( -SELECT D1.col7 - ,D1.col4KEY - ,D1.temp_result_2 - ,D1.temp_result_1 - ,CASE WHEN D2.col4KEY-D1.col4KEY > 30 THEN D1.col4KEY - WHEN D1.col4KEY = M.END_DATE THEN D1.col4KEY ELSE 0 END AS temp_result_3 - ,CASE WHEN D2.col4KEY-D1.col4KEY > 30 THEN D2.col4KEY - WHEN D1.col4KEY = M.START_DATE THEN D1.col4KEY ELSE 0 END AS temp_result_4 - FROM temp_tbl_2 D1 - INNER JOIN temp_tbl_3 M - ON D1.col7 = M.col7 - LEFT JOIN temp_tbl_2 D2 - ON D1.col7 = D2.col7 - AND D1.temp_result_2 = D2.temp_result_2+1 -), - -temp_tbl_5 AS ( -SELECT S1.col7 - ,S1.col4KEY - ,S1.temp_result_2 - ,S1.temp_result_1 - ,CASE WHEN S1.col4KEY >= S2.temp_result_4 - AND S1.col4KEY <= S3.temp_result_3 - THEN 1 ELSE 0 END AS temp_result_5 - FROM temp_tbl_4 S1 - LEFT JOIN temp_tbl_4 S2 - ON S1.col7 = S2.col7 - AND S2.temp_result_4 != 0 - LEFT JOIN temp_tbl_4 S3 - ON S1.col7 = S3.col7 - AND S3.temp_result_3 != 0 -), - -temp_tbl_6 AS ( -SELECT col7 - ,col4KEY - ,temp_result_2 - ,temp_result_1 - ,SUM(temp_result_5) AS temp_result_5 - FROM temp_tbl_5 - GROUP BY col7 - ,col4KEY - ,temp_result_2 - ,temp_result_1 -), - -temp_tbl_7 AS ( -SELECT col7 - ,SUM(temp_result_2) AS temp_result_6 - ,SUM(temp_result_1) AS temp_result_1 - FROM temp_tbl_6 - GROUP BY col7 -) - -SELECT S.* - FROM temp_tbl_6 S - INNER JOIN - temp_tbl_7 F - ON S.col7 = F.col7 - --WHERE F.temp_result_6 < 40 - --AND F.temp_result_1 < 200 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@common_join_table -POSTHOOK: Output: hdfs://### HDFS PATH ### -id 109515 1 10 1 -PREHOOK: query: drop table common_join_table -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@common_join_table -PREHOOK: Output: default@common_join_table -POSTHOOK: query: drop table common_join_table -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@common_join_table -POSTHOOK: Output: default@common_join_table diff --git ql/src/test/results/clientpositive/tez/vector_delete_orig_table.q.out ql/src/test/results/clientpositive/tez/vector_delete_orig_table.q.out deleted file mode 100644 index d1d8cc05e5..0000000000 --- ql/src/test/results/clientpositive/tez/vector_delete_orig_table.q.out +++ /dev/null @@ -1,180 +0,0 @@ -PREHOOK: query: create table acid_dot( - ctinyint TINYINT, - csmallint SMALLINT, - cint INT, - cbigint BIGINT, - cfloat FLOAT, - cdouble DOUBLE, - cstring1 STRING, - cstring2 STRING, - ctimestamp1 TIMESTAMP, - ctimestamp2 TIMESTAMP, - cboolean1 BOOLEAN, -#### A masked pattern was here #### -PREHOOK: type: CREATETABLE -PREHOOK: Input: hdfs://### HDFS PATH ### -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_dot -POSTHOOK: query: create table acid_dot( - ctinyint TINYINT, - csmallint SMALLINT, - cint INT, - cbigint BIGINT, - cfloat FLOAT, - cdouble DOUBLE, - cstring1 STRING, - cstring2 STRING, - ctimestamp1 TIMESTAMP, - ctimestamp2 TIMESTAMP, - cboolean1 BOOLEAN, -#### A masked pattern was here #### -POSTHOOK: type: CREATETABLE -POSTHOOK: Input: hdfs://### HDFS PATH ### -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_dot -PREHOOK: query: explain vectorization detail -select count(*) from acid_dot -PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization detail -select count(*) from acid_dot -POSTHOOK: type: QUERY -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: acid_dot - Statistics: Num rows: 5865 Data size: 2956160 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct] - Select Operator - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [] - Statistics: Num rows: 5865 Data size: 2956160 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - Group By Vectorization: - aggregators: VectorUDAFCountStar(*) -> bigint - className: VectorGroupByOperator - groupByMode: HASH - native: false - vectorProcessingMode: HASH - projectedOutputColumnNums: [0] - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Reduce Sink Vectorization: - className: VectorReduceSinkEmptyKeyOperator - keyColumnNums: [] - native: true - nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - valueColumnNums: [0] - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: vectorized - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 12 - includeColumns: [] - dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reducer 2 - Execution mode: vectorized - Reduce Vectorization: - enabled: true - enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true - reduceColumnNullOrder: - reduceColumnSortOrder: - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 1 - dataColumns: VALUE._col0:bigint - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - Group By Vectorization: - aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint - className: VectorGroupByOperator - groupByMode: MERGEPARTIAL - native: false - vectorProcessingMode: GLOBAL - projectedOutputColumnNums: [0] - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from acid_dot -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_dot -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select count(*) from acid_dot -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_dot -POSTHOOK: Output: hdfs://### HDFS PATH ### -12288 -PREHOOK: query: delete from acid_dot where cint < -1070551679 -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_dot -PREHOOK: Output: default@acid_dot -POSTHOOK: query: delete from acid_dot where cint < -1070551679 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_dot -POSTHOOK: Output: default@acid_dot -PREHOOK: query: select count(*) from acid_dot -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_dot -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select count(*) from acid_dot -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_dot -POSTHOOK: Output: hdfs://### HDFS PATH ### -12192 -#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/udaf_context_ngrams.q.out ql/src/test/results/clientpositive/udaf_context_ngrams.q.out deleted file mode 100644 index 843039e95b..0000000000 --- ql/src/test/results/clientpositive/udaf_context_ngrams.q.out +++ /dev/null @@ -1,69 +0,0 @@ -PREHOOK: query: CREATE TABLE kafka_n0 (contents STRING) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@kafka_n0 -POSTHOOK: query: CREATE TABLE kafka_n0 (contents STRING) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@kafka_n0 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka_n0 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@kafka_n0 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka_n0 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@kafka_n0 -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -[267.0,171.0,164.0,119.0,108.0,106.0,106.0,82.0,79.0,67.0,67.0,46.0,45.0,42.0,42.0,40.0,39.0,37.0,37.0,36.0,34.0,32.0,32.0,30.0,30.0,29.0,28.0,28.0,28.0,28.0,26.0,25.0,24.0,23.0,23.0,22.0,22.0,21.0,20.0,19.0,18.0,18.0,18.0,17.0,17.0,17.0,16.0,16.0,16.0,16.0,15.0,15.0,14.0,14.0,14.0,13.0,13.0,13.0,13.0,13.0,13.0,13.0,12.0,12.0,12.0,12.0,12.0,12.0,12.0,11.0,11.0,11.0,11.0,11.0,11.0,11.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,8.0] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -[{"ngram":["was"],"estfrequency":17.0},{"ngram":["had"],"estfrequency":16.0},{"ngram":["thought"],"estfrequency":13.0},{"ngram":["could"],"estfrequency":9.0},{"ngram":["would"],"estfrequency":7.0},{"ngram":["lay"],"estfrequency":5.0},{"ngram":["did"],"estfrequency":4.0},{"ngram":["felt"],"estfrequency":4.0},{"ngram":["looked"],"estfrequency":4.0},{"ngram":["s"],"estfrequency":4.0},{"ngram":["wanted"],"estfrequency":4.0},{"ngram":["finally"],"estfrequency":3.0},{"ngram":["lifted"],"estfrequency":3.0},{"ngram":["must"],"estfrequency":3.0},{"ngram":["needed"],"estfrequency":3.0},{"ngram":["slid"],"estfrequency":3.0},{"ngram":["told"],"estfrequency":3.0},{"ngram":["tried"],"estfrequency":3.0},{"ngram":["also"],"estfrequency":2.0},{"ngram":["always"],"estfrequency":2.0},{"ngram":["began"],"estfrequency":2.0},{"ngram":["didn't"],"estfrequency":2.0},{"ngram":["do"],"estfrequency":2.0},{"ngram":["drew"],"estfrequency":2.0},{"ngram":["found"],"estfrequency":2.0},{"ngram":["is"],"estfrequency":2.0},{"ngram":["let"],"estfrequency":2.0},{"ngram":["made"],"estfrequency":2.0},{"ngram":["really"],"estfrequency":2.0},{"ngram":["reported"],"estfrequency":2.0},{"ngram":["threw"],"estfrequency":2.0},{"ngram":["touched"],"estfrequency":2.0},{"ngram":["wouldn't"],"estfrequency":2.0},{"ngram":["allowed"],"estfrequency":1.0},{"ngram":["almost"],"estfrequency":1.0},{"ngram":["became"],"estfrequency":1.0},{"ngram":["called"],"estfrequency":1.0},{"ngram":["caught"],"estfrequency":1.0},{"ngram":["chose"],"estfrequency":1.0},{"ngram":["confined"],"estfrequency":1.0},{"ngram":["cut"],"estfrequency":1.0},{"ngram":["denied"],"estfrequency":1.0},{"ngram":["directed"],"estfrequency":1.0},{"ngram":["discovered"],"estfrequency":1.0},{"ngram":["failed"],"estfrequency":1.0},{"ngram":["have"],"estfrequency":1.0},{"ngram":["heard"],"estfrequency":1.0},{"ngram":["hit"],"estfrequency":1.0},{"ngram":["hoped"],"estfrequency":1.0},{"ngram":["intended"],"estfrequency":1.0},{"ngram":["maintained"],"estfrequency":1.0},{"ngram":["managed"],"estfrequency":1.0},{"ngram":["never"],"estfrequency":1.0},{"ngram":["preferred"],"estfrequency":1.0},{"ngram":["remembered"],"estfrequency":1.0},{"ngram":["retracted"],"estfrequency":1.0},{"ngram":["said"],"estfrequency":1.0},{"ngram":["sits"],"estfrequency":1.0},{"ngram":["slowly"],"estfrequency":1.0},{"ngram":["stood"],"estfrequency":1.0},{"ngram":["swung"],"estfrequency":1.0},{"ngram":["turned"],"estfrequency":1.0},{"ngram":["urged"],"estfrequency":1.0},{"ngram":["were"],"estfrequency":1.0},{"ngram":["will"],"estfrequency":1.0}] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -[{"ngram":["travelling"],"estfrequency":3.0}] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -[{"ngram":["think"],"estfrequency":3.0},{"ngram":["feel"],"estfrequency":2.0}] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka_n0 -#### A masked pattern was here #### -[23.0,20.0,18.0,17.0,17.0,16.0,16.0,16.0,16.0,15.0,14.0,13.0,12.0,12.0,12.0,11.0,11.0,11.0,10.0,10.0,10.0,10.0,10.0,10.0,9.0,9.0,9.0,8.0,8.0,8.0,8.0,7.0,7.0,7.0,7.0,7.0,7.0,7.0,7.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0] -PREHOOK: query: DROP TABLE kafka_n0 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@kafka_n0 -PREHOOK: Output: default@kafka_n0 -POSTHOOK: query: DROP TABLE kafka_n0 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@kafka_n0 -POSTHOOK: Output: default@kafka_n0 diff --git ql/src/test/results/clientpositive/udaf_corr.q.out ql/src/test/results/clientpositive/udaf_corr.q.out deleted file mode 100644 index 44809c5d23..0000000000 --- ql/src/test/results/clientpositive/udaf_corr.q.out +++ /dev/null @@ -1,108 +0,0 @@ -PREHOOK: query: DROP TABLE covar_tab_n0 -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE covar_tab_n0 -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE covar_tab_n0 (a INT, b INT, c INT) -ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' -STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@covar_tab_n0 -POSTHOOK: query: CREATE TABLE covar_tab_n0 (a INT, b INT, c INT) -ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' -STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@covar_tab_n0 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab_n0 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@covar_tab_n0 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab_n0 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@covar_tab_n0 -PREHOOK: query: DESCRIBE FUNCTION corr -PREHOOK: type: DESCFUNCTION -POSTHOOK: query: DESCRIBE FUNCTION corr -POSTHOOK: type: DESCFUNCTION -corr(y,x) - Returns the Pearson coefficient of correlation -between a set of number pairs -PREHOOK: query: DESCRIBE FUNCTION EXTENDED corr -PREHOOK: type: DESCFUNCTION -POSTHOOK: query: DESCRIBE FUNCTION EXTENDED corr -POSTHOOK: type: DESCFUNCTION -corr(y,x) - Returns the Pearson coefficient of correlation -between a set of number pairs -The function takes as arguments any pair of numeric types and returns a double. -Any pair with a NULL is ignored. -If applied to an empty set: NULL is returned. -If N*SUM(x*x) = SUM(x)*SUM(x): NULL is returned. -If N*SUM(y*y) = SUM(y)*SUM(y): NULL is returned. -Otherwise, it computes the following: - COVAR_POP(x,y)/(STDDEV_POP(x)*STDDEV_POP(y)) -where neither x nor y is null, -COVAR_POP is the population covariance, -and STDDEV_POP is the population standard deviation. -Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCorrelation -Function type:BUILTIN -PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -NULL -PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -NULL -PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a = 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a = 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -NULL -PREHOOK: query: SELECT a, corr(b, c) FROM covar_tab_n0 GROUP BY a ORDER BY a -PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, corr(b, c) FROM covar_tab_n0 GROUP BY a ORDER BY a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -1 NULL -2 NULL -3 NULL -4 NULL -5 NULL -6 NULL -PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab_n0 -#### A masked pattern was here #### -0.6633880657639326 -PREHOOK: query: DROP TABLE covar_tab_n0 -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@covar_tab_n0 -PREHOOK: Output: default@covar_tab_n0 -POSTHOOK: query: DROP TABLE covar_tab_n0 -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@covar_tab_n0 -POSTHOOK: Output: default@covar_tab_n0 diff --git ql/src/test/results/clientpositive/udaf_histogram_numeric.q.out ql/src/test/results/clientpositive/udaf_histogram_numeric.q.out deleted file mode 100644 index 0cecd1e3a3..0000000000 --- ql/src/test/results/clientpositive/udaf_histogram_numeric.q.out +++ /dev/null @@ -1,36 +0,0 @@ -PREHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 2) FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 2) FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -[{"x":135.0284552845532,"y":246.0},{"x":381.39370078740143,"y":254.0}] -PREHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 3) FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 3) FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -[{"x":96.7349397590361,"y":166.0},{"x":257.14970059880255,"y":167.0},{"x":425.6826347305388,"y":167.0}] -PREHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 20) FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 20) FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -[{"x":9.761904761904763,"y":21.0},{"x":33.84210526315789,"y":19.0},{"x":62.75000000000001,"y":20.0},{"x":90.90322580645162,"y":31.0},{"x":122.91666666666667,"y":24.0},{"x":146.33333333333334,"y":21.0},{"x":170.70967741935485,"y":31.0},{"x":194.3571428571428,"y":28.0},{"x":214.84615384615384,"y":26.0},{"x":235.08695652173907,"y":23.0},{"x":257.80000000000007,"y":15.0},{"x":281.0333333333333,"y":30.0},{"x":298.0,"y":1.0},{"x":313.0000000000001,"y":29.0},{"x":339.5925925925926,"y":27.0},{"x":372.49999999999983,"y":24.0},{"x":402.23684210526324,"y":38.0},{"x":430.6896551724138,"y":29.0},{"x":462.32352941176464,"y":34.0},{"x":487.72413793103453,"y":29.0}] -PREHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 200) FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT histogram_numeric(cast(substr(src.value,5) AS double), 200) FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -[{"x":0.0,"y":3.0},{"x":2.0,"y":1.0},{"x":4.75,"y":4.0},{"x":8.0,"y":1.0},{"x":9.5,"y":2.0},{"x":11.666666666666666,"y":3.0},{"x":15.0,"y":2.0},{"x":17.666666666666664,"y":3.0},{"x":19.5,"y":2.0},{"x":24.0,"y":2.0},{"x":26.333333333333336,"y":3.0},{"x":28.0,"y":1.0},{"x":30.0,"y":1.0},{"x":33.0,"y":1.0},{"x":34.75,"y":4.0},{"x":37.0,"y":2.0},{"x":41.666666666666664,"y":3.0},{"x":43.5,"y":2.0},{"x":47.0,"y":1.0},{"x":51.0,"y":2.0},{"x":53.5,"y":2.0},{"x":57.666666666666664,"y":3.0},{"x":64.5,"y":2.0},{"x":66.66666666666666,"y":3.0},{"x":69.75,"y":4.0},{"x":72.0,"y":2.0},{"x":74.0,"y":1.0},{"x":76.33333333333333,"y":3.0},{"x":78.0,"y":1.0},{"x":80.0,"y":1.0},{"x":82.0,"y":1.0},{"x":83.5,"y":4.0},{"x":85.5,"y":2.0},{"x":87.0,"y":1.0},{"x":90.0,"y":3.0},{"x":92.0,"y":1.0},{"x":95.33333333333333,"y":3.0},{"x":97.5,"y":4.0},{"x":100.0,"y":2.0},{"x":103.5,"y":4.0},{"x":105.0,"y":1.0},{"x":111.0,"y":1.0},{"x":113.33333333333333,"y":3.0},{"x":116.0,"y":1.0},{"x":118.0,"y":2.0},{"x":119.4,"y":5.0},{"x":125.33333333333333,"y":3.0},{"x":128.4,"y":5.0},{"x":131.0,"y":1.0},{"x":133.66666666666666,"y":3.0},{"x":136.66666666666666,"y":3.0},{"x":138.0,"y":4.0},{"x":143.0,"y":1.0},{"x":145.66666666666666,"y":3.0},{"x":149.33333333333331,"y":3.0},{"x":152.33333333333334,"y":3.0},{"x":155.5,"y":2.0},{"x":157.5,"y":2.0},{"x":160.0,"y":1.0},{"x":162.5,"y":2.0},{"x":164.5,"y":4.0},{"x":166.75,"y":4.0},{"x":168.8,"y":5.0},{"x":170.0,"y":1.0},{"x":172.0,"y":2.0},{"x":174.5,"y":4.0},{"x":176.33333333333331,"y":3.0},{"x":178.0,"y":1.0},{"x":179.33333333333331,"y":3.0},{"x":181.0,"y":1.0},{"x":183.0,"y":1.0},{"x":186.75,"y":4.0},{"x":189.0,"y":1.0},{"x":190.66666666666666,"y":3.0},{"x":192.75,"y":4.0},{"x":194.0,"y":1.0},{"x":195.33333333333331,"y":3.0},{"x":197.0,"y":2.0},{"x":199.4,"y":5.0},{"x":201.0,"y":1.0},{"x":202.66666666666669,"y":3.0},{"x":205.0,"y":2.0},{"x":207.0,"y":2.0},{"x":208.40000000000003,"y":5.0},{"x":213.33333333333331,"y":3.0},{"x":216.0,"y":2.0},{"x":217.33333333333331,"y":3.0},{"x":219.0,"y":2.0},{"x":221.33333333333331,"y":3.0},{"x":223.5,"y":4.0},{"x":226.0,"y":1.0},{"x":228.66666666666663,"y":3.0},{"x":230.0,"y":5.0},{"x":233.0,"y":2.0},{"x":235.0,"y":1.0},{"x":237.5,"y":4.0},{"x":239.0,"y":2.0},{"x":241.66666666666669,"y":3.0},{"x":244.0,"y":1.0},{"x":247.5,"y":2.0},{"x":249.0,"y":1.0},{"x":252.0,"y":1.0},{"x":255.5,"y":4.0},{"x":257.5,"y":2.0},{"x":260.0,"y":1.0},{"x":262.5,"y":2.0},{"x":265.3333333333333,"y":3.0},{"x":272.6,"y":5.0},{"x":274.5,"y":2.0},{"x":277.3333333333333,"y":6.0},{"x":280.0,"y":2.0},{"x":281.5,"y":4.0},{"x":283.5,"y":2.0},{"x":285.0,"y":1.0},{"x":286.5,"y":2.0},{"x":288.3333333333333,"y":3.0},{"x":291.5,"y":2.0},{"x":296.0,"y":1.0},{"x":298.0,"y":3.0},{"x":302.0,"y":1.0},{"x":305.5,"y":2.0},{"x":307.3333333333333,"y":3.0},{"x":309.0,"y":2.0},{"x":310.75,"y":4.0},{"x":315.75,"y":4.0},{"x":317.6,"y":5.0},{"x":321.5,"y":4.0},{"x":323.0,"y":1.0},{"x":325.0,"y":2.0},{"x":327.0,"y":3.0},{"x":331.3333333333333,"y":3.0},{"x":333.0,"y":2.0},{"x":335.5,"y":2.0},{"x":338.5,"y":2.0},{"x":341.66666666666663,"y":3.0},{"x":344.3333333333333,"y":3.0},{"x":348.0,"y":5.0},{"x":351.0,"y":1.0},{"x":353.0,"y":2.0},{"x":356.0,"y":1.0},{"x":360.0,"y":1.0},{"x":362.0,"y":1.0},{"x":364.5,"y":2.0},{"x":366.66666666666663,"y":3.0},{"x":368.75,"y":4.0},{"x":373.5,"y":2.0},{"x":375.0,"y":1.0},{"x":377.5,"y":2.0},{"x":379.0,"y":1.0},{"x":382.0,"y":2.0},{"x":384.0,"y":3.0},{"x":386.0,"y":1.0},{"x":389.0,"y":1.0},{"x":392.0,"y":1.0},{"x":393.5,"y":2.0},{"x":395.6,"y":5.0},{"x":397.0,"y":2.0},{"x":399.0,"y":2.0},{"x":400.0,"y":1.0},{"x":401.16666666666663,"y":6.0},{"x":403.40000000000003,"y":5.0},{"x":406.20000000000005,"y":5.0},{"x":409.0,"y":3.0},{"x":411.0,"y":1.0},{"x":413.5,"y":4.0},{"x":417.0,"y":3.0},{"x":418.5,"y":2.0},{"x":421.0,"y":1.0},{"x":424.0,"y":2.0},{"x":427.0,"y":1.0},{"x":429.6,"y":5.0},{"x":431.25,"y":4.0},{"x":435.5,"y":2.0},{"x":437.75,"y":4.0},{"x":439.0,"y":2.0},{"x":443.5,"y":2.0},{"x":446.0,"y":1.0},{"x":448.5,"y":2.0},{"x":452.5,"y":2.0},{"x":454.24999999999994,"y":4.0},{"x":457.66666666666663,"y":3.0},{"x":459.33333333333337,"y":3.0},{"x":462.5,"y":4.0},{"x":466.0,"y":3.0},{"x":467.80000000000007,"y":5.0},{"x":469.16666666666663,"y":6.0},{"x":472.0,"y":1.0},{"x":475.0,"y":1.0},{"x":477.0,"y":1.0},{"x":478.33333333333326,"y":3.0},{"x":480.25,"y":4.0},{"x":482.5,"y":2.0},{"x":484.5,"y":2.0},{"x":487.0,"y":1.0},{"x":489.2,"y":5.0},{"x":491.66666666666663,"y":3.0},{"x":493.0,"y":1.0},{"x":494.5,"y":2.0},{"x":496.0,"y":1.0},{"x":497.75,"y":4.0}] diff --git ql/src/test/results/clientpositive/udaf_percentile_approx_20.q.out ql/src/test/results/clientpositive/udaf_percentile_approx_20.q.out deleted file mode 100644 index 14e743efa1..0000000000 --- ql/src/test/results/clientpositive/udaf_percentile_approx_20.q.out +++ /dev/null @@ -1,491 +0,0 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) - -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: create table t1 (result double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t1 (result double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (result double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t2 (result double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3 (result double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t3 (result double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: create table t4 (result double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t4 (result double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t4 -PREHOOK: query: create table t5 (result double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t5 (result double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t5 -PREHOOK: query: create table t6 (result double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t6 (result double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t6 -PREHOOK: query: create table t7 (result array) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t7 (result array) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t7 -PREHOOK: query: create table t8 (result array) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t8 (result array) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t8 -PREHOOK: query: create table t9 (result array) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t9 (result array) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t9 -PREHOOK: query: create table t10 (result array) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t10 (result array) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t10 -PREHOOK: query: create table t11 (result array) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t11 (result array) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t11 -PREHOOK: query: create table t12 (result array) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -POSTHOOK: query: create table t12 (result array) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t12 -PREHOOK: query: -- disable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) - -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) - -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) - -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) -PREHOOK: type: QUERY -PREHOOK: Input: default@bucket -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t10 -PREHOOK: Output: default@t11 -PREHOOK: Output: default@t12 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 -PREHOOK: Output: default@t9 -POSTHOOK: query: -- disable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) - -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) - -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) - -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t10 -POSTHOOK: Output: default@t11 -POSTHOOK: Output: default@t12 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Output: default@t9 -POSTHOOK: Lineage: t1.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t10.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t11.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t12.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t2.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t3.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t4.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t5.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t6.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t7.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t8.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t9.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -PREHOOK: query: select * from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -#### A masked pattern was here #### -POSTHOOK: query: select * from t2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -#### A masked pattern was here #### -254.08333333333334 -PREHOOK: query: select * from t3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from t3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t4 -PREHOOK: type: QUERY -PREHOOK: Input: default@t4 -#### A masked pattern was here #### -POSTHOOK: query: select * from t4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t5 -#### A masked pattern was here #### -POSTHOOK: query: select * from t5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 -#### A masked pattern was here #### -254.08333333333334 -PREHOOK: query: select * from t6 -PREHOOK: type: QUERY -PREHOOK: Input: default@t6 -#### A masked pattern was here #### -POSTHOOK: query: select * from t6 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t7 -#### A masked pattern was here #### -POSTHOOK: query: select * from t7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t8 -PREHOOK: type: QUERY -PREHOOK: Input: default@t8 -#### A masked pattern was here #### -POSTHOOK: query: select * from t8 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 -#### A masked pattern was here #### -[23.355555555555558,254.08333333333334,476.5612244897959,489.50000000000006] -PREHOOK: query: select * from t9 -PREHOOK: type: QUERY -PREHOOK: Input: default@t9 -#### A masked pattern was here #### -POSTHOOK: query: select * from t9 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t9 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t10 -PREHOOK: type: QUERY -PREHOOK: Input: default@t10 -#### A masked pattern was here #### -POSTHOOK: query: select * from t10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t10 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t11 -PREHOOK: type: QUERY -PREHOOK: Input: default@t11 -#### A masked pattern was here #### -POSTHOOK: query: select * from t11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t11 -#### A masked pattern was here #### -[23.355555555555558,254.08333333333334,476.5612244897959,489.50000000000006] -PREHOOK: query: select * from t12 -PREHOOK: type: QUERY -PREHOOK: Input: default@t12 -#### A masked pattern was here #### -POSTHOOK: query: select * from t12 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t12 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: -- enable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) - -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) - -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) - -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) -PREHOOK: type: QUERY -PREHOOK: Input: default@bucket -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t10 -PREHOOK: Output: default@t11 -PREHOOK: Output: default@t12 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 -PREHOOK: Output: default@t9 -POSTHOOK: query: -- enable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) - -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) - -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) - -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t10 -POSTHOOK: Output: default@t11 -POSTHOOK: Output: default@t12 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Output: default@t9 -POSTHOOK: Lineage: t1.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t10.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t11.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t12.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t2.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t3.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t4.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t5.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t6.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t7.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t8.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t9.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -PREHOOK: query: select * from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -#### A masked pattern was here #### -POSTHOOK: query: select * from t2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -#### A masked pattern was here #### -254.08333333333334 -PREHOOK: query: select * from t3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from t3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t4 -PREHOOK: type: QUERY -PREHOOK: Input: default@t4 -#### A masked pattern was here #### -POSTHOOK: query: select * from t4 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t5 -#### A masked pattern was here #### -POSTHOOK: query: select * from t5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 -#### A masked pattern was here #### -254.08333333333334 -PREHOOK: query: select * from t6 -PREHOOK: type: QUERY -PREHOOK: Input: default@t6 -#### A masked pattern was here #### -POSTHOOK: query: select * from t6 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 -#### A masked pattern was here #### -255.5 -PREHOOK: query: select * from t7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t7 -#### A masked pattern was here #### -POSTHOOK: query: select * from t7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t8 -PREHOOK: type: QUERY -PREHOOK: Input: default@t8 -#### A masked pattern was here #### -POSTHOOK: query: select * from t8 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 -#### A masked pattern was here #### -[23.355555555555558,254.08333333333334,476.5612244897959,489.50000000000006] -PREHOOK: query: select * from t9 -PREHOOK: type: QUERY -PREHOOK: Input: default@t9 -#### A masked pattern was here #### -POSTHOOK: query: select * from t9 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t9 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t10 -PREHOOK: type: QUERY -PREHOOK: Input: default@t10 -#### A masked pattern was here #### -POSTHOOK: query: select * from t10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t10 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t11 -PREHOOK: type: QUERY -PREHOOK: Input: default@t11 -#### A masked pattern was here #### -POSTHOOK: query: select * from t11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t11 -#### A masked pattern was here #### -[23.355555555555558,254.08333333333334,476.5612244897959,489.50000000000006] -PREHOOK: query: select * from t12 -PREHOOK: type: QUERY -PREHOOK: Input: default@t12 -#### A masked pattern was here #### -POSTHOOK: query: select * from t12 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t12 -#### A masked pattern was here #### -[26.0,255.5,479.0,491.0] diff --git ql/src/test/results/clientpositive/udaf_percentile_cont_disc.q.out ql/src/test/results/clientpositive/udaf_percentile_cont_disc.q.out deleted file mode 100644 index 7400d0bd7b..0000000000 --- ql/src/test/results/clientpositive/udaf_percentile_cont_disc.q.out +++ /dev/null @@ -1,842 +0,0 @@ -PREHOOK: query: DESCRIBE FUNCTION percentile_cont -PREHOOK: type: DESCFUNCTION -POSTHOOK: query: DESCRIBE FUNCTION percentile_cont -POSTHOOK: type: DESCFUNCTION -percentile_cont(input, pc) - Returns the percentile of expr at pc (range: [0,1]). -PREHOOK: query: DESCRIBE FUNCTION EXTENDED percentile_cont -PREHOOK: type: DESCFUNCTION -POSTHOOK: query: DESCRIBE FUNCTION EXTENDED percentile_cont -POSTHOOK: type: DESCFUNCTION -percentile_cont(input, pc) - Returns the percentile of expr at pc (range: [0,1]). -Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFPercentileCont -Function type:BUILTIN -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(CAST(substr(value, 5) AS INT), 0.0), - percentile_cont(CAST(substr(value, 5) AS INT), 0.5), - percentile_cont(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(NULL, 0.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(NULL, 0.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 NULL -1 NULL -10 NULL -11 NULL -12 NULL -13 NULL -14 NULL -15 NULL -16 NULL -17 NULL -18 NULL -19 NULL -2 NULL -20 NULL -21 NULL -22 NULL -23 NULL -24 NULL -25 NULL -26 NULL -27 NULL -28 NULL -29 NULL -3 NULL -30 NULL -31 NULL -32 NULL -33 NULL -34 NULL -35 NULL -36 NULL -37 NULL -38 NULL -39 NULL -4 NULL -40 NULL -41 NULL -42 NULL -43 NULL -44 NULL -45 NULL -46 NULL -47 NULL -48 NULL -49 NULL -5 NULL -6 NULL -7 NULL -8 NULL -9 NULL -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_cont(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 1.0 -1 1.0 -10 NULL -11 NULL -12 NULL -13 NULL -14 NULL -15 NULL -16 NULL -17 NULL -18 NULL -19 NULL -2 1.0 -20 NULL -21 NULL -22 NULL -23 NULL -24 NULL -25 NULL -26 NULL -27 NULL -28 NULL -29 NULL -3 1.0 -30 NULL -31 NULL -32 NULL -33 NULL -34 NULL -35 NULL -36 NULL -37 NULL -38 NULL -39 NULL -4 1.0 -40 NULL -41 NULL -42 NULL -43 NULL -44 NULL -45 NULL -46 NULL -47 NULL -48 NULL -49 NULL -5 NULL -6 NULL -7 NULL -8 NULL -9 NULL -PREHOOK: query: select percentile_cont(cast(key as bigint), 0.5) from src where false -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: select percentile_cont(cast(key as bigint), 0.5) from src where false -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -NULL -PREHOOK: query: DESCRIBE FUNCTION percentile_disc -PREHOOK: type: DESCFUNCTION -POSTHOOK: query: DESCRIBE FUNCTION percentile_disc -POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'percentile_disc' -PREHOOK: query: DESCRIBE FUNCTION EXTENDED percentile_disc -PREHOOK: type: DESCFUNCTION -POSTHOOK: query: DESCRIBE FUNCTION EXTENDED percentile_disc -POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'percentile_disc' -Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFPercentileDisc -Function type:BUILTIN -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(CAST(substr(value, 5) AS INT), 0.0), - percentile_disc(CAST(substr(value, 5) AS INT), 0.5), - percentile_disc(CAST(substr(value, 5) AS INT), 1.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0.0 4.5 9.0 -1 10.0 15.0 19.0 -10 100.0 103.0 105.0 -11 111.0 117.0 119.0 -12 120.0 127.0 129.0 -13 131.0 137.0 138.0 -14 143.0 146.0 149.0 -15 150.0 154.0 158.0 -16 160.0 166.5 169.0 -17 170.0 175.0 179.0 -18 180.0 186.5 189.0 -19 190.0 194.5 199.0 -2 20.0 26.0 28.0 -20 200.0 205.0 209.0 -21 213.0 216.5 219.0 -22 221.0 224.0 229.0 -23 230.0 234.0 239.0 -24 241.0 244.0 249.0 -25 252.0 256.0 258.0 -26 260.0 264.0 266.0 -27 272.0 275.0 278.0 -28 280.0 283.5 289.0 -29 291.0 297.0 298.0 -3 30.0 35.0 37.0 -30 302.0 307.0 309.0 -31 310.0 316.0 318.0 -32 321.0 324.0 327.0 -33 331.0 333.0 339.0 -34 341.0 345.0 348.0 -35 351.0 353.0 356.0 -36 360.0 367.0 369.0 -37 373.0 376.0 379.0 -38 382.0 384.0 389.0 -39 392.0 396.0 399.0 -4 41.0 42.5 47.0 -40 400.0 403.5 409.0 -41 411.0 415.5 419.0 -42 421.0 425.5 429.0 -43 430.0 435.0 439.0 -44 443.0 446.0 449.0 -45 452.0 455.0 459.0 -46 460.0 467.5 469.0 -47 470.0 477.0 479.0 -48 480.0 484.0 489.0 -49 490.0 494.5 498.0 -5 51.0 54.0 58.0 -6 64.0 66.5 69.0 -7 70.0 73.0 78.0 -8 80.0 84.0 87.0 -9 90.0 95.0 98.0 -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(NULL, 0.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(NULL, 0.0) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 NULL -1 NULL -10 NULL -11 NULL -12 NULL -13 NULL -14 NULL -15 NULL -16 NULL -17 NULL -18 NULL -19 NULL -2 NULL -20 NULL -21 NULL -22 NULL -23 NULL -24 NULL -25 NULL -26 NULL -27 NULL -28 NULL -29 NULL -3 NULL -30 NULL -31 NULL -32 NULL -33 NULL -34 NULL -35 NULL -36 NULL -37 NULL -38 NULL -39 NULL -4 NULL -40 NULL -41 NULL -42 NULL -43 NULL -44 NULL -45 NULL -46 NULL -47 NULL -48 NULL -49 NULL -5 NULL -6 NULL -7 NULL -8 NULL -9 NULL -PREHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, - percentile_disc(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5) -FROM src -GROUP BY CAST(key AS INT) DIV 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 1.0 -1 1.0 -10 NULL -11 NULL -12 NULL -13 NULL -14 NULL -15 NULL -16 NULL -17 NULL -18 NULL -19 NULL -2 1.0 -20 NULL -21 NULL -22 NULL -23 NULL -24 NULL -25 NULL -26 NULL -27 NULL -28 NULL -29 NULL -3 1.0 -30 NULL -31 NULL -32 NULL -33 NULL -34 NULL -35 NULL -36 NULL -37 NULL -38 NULL -39 NULL -4 1.0 -40 NULL -41 NULL -42 NULL -43 NULL -44 NULL -45 NULL -46 NULL -47 NULL -48 NULL -49 NULL -5 NULL -6 NULL -7 NULL -8 NULL -9 NULL -PREHOOK: query: select percentile_disc(cast(key as bigint), 0.5) from src where false -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: select percentile_disc(cast(key as bigint), 0.5) from src where false -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -NULL diff --git ql/src/test/results/clientpositive/union_stats.q.out ql/src/test/results/clientpositive/union_stats.q.out deleted file mode 100644 index 1acf5c858c..0000000000 --- ql/src/test/results/clientpositive/union_stats.q.out +++ /dev/null @@ -1,794 +0,0 @@ -PREHOOK: query: explain extended create table t as select * from src union all select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: explain extended create table t as select * from src union all select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 - Stage-4 - Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 - Stage-9 depends on stages: Stage-0 - Stage-2 depends on stages: Stage-9 - Stage-3 - Stage-5 - Stage-6 depends on stages: Stage-5 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns key,value - columns.comments 'default','default' - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [$hdt$_0-subquery1:src, $hdt$_0-subquery2:src] - - Stage: Stage-7 - Conditional Operator - - Stage: Stage-4 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-9 - Create Table Operator: - Create Table - columns: key string, value string - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - - Stage: Stage-2 - Stats Work - Basic Stats Work: -#### A masked pattern was here #### - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10004 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - name: default.t - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -ext-10004 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns key,value - columns.types string:string - name default.t - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t - name: default.t - Truncated Path -> Alias: -#### A masked pattern was here #### - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: create table t as select * from src union all select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t as select * from src union all select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from t -PREHOOK: type: QUERY -PREHOOK: Input: default@t -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t -#### A masked pattern was here #### -1000 -PREHOOK: query: desc formatted t -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t -POSTHOOK: query: desc formatted t -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 1 - numRows 1000 - rawDataSize 10624 - totalSize 11624 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: create table tt as select * from t union all select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Input: default@t -PREHOOK: Output: database:default -PREHOOK: Output: default@tt -POSTHOOK: query: create table tt as select * from t union all select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Input: default@t -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tt -POSTHOOK: Lineage: tt.key EXPRESSION [(t)t.FieldSchema(name:key, type:string, comment:null), (src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tt.value EXPRESSION [(t)t.FieldSchema(name:value, type:string, comment:null), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted tt -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tt -POSTHOOK: query: desc formatted tt -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tt -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 1 - numRows 1500 - rawDataSize 15936 - totalSize 17436 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: drop table tt -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tt -PREHOOK: Output: default@tt -POSTHOOK: query: drop table tt -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tt -POSTHOOK: Output: default@tt -PREHOOK: query: create table tt as select * from src union all select * from t -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Input: default@t -PREHOOK: Output: database:default -PREHOOK: Output: default@tt -POSTHOOK: query: create table tt as select * from src union all select * from t -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Input: default@t -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tt -POSTHOOK: Lineage: tt.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (t)t.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: tt.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (t)t.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: desc formatted tt -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tt -POSTHOOK: query: desc formatted tt -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tt -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - bucketing_version 2 - numFiles 1 - numRows 1500 - rawDataSize 15936 - totalSize 17436 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: create table t1 like src -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 like src -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 like src -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 like src -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: explain from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -PREHOOK: type: QUERY -POSTHOOK: query: explain from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-3 depends on stages: Stage-0, Stage-10 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-1 depends on stages: Stage-2 - Stage-9 depends on stages: Stage-1, Stage-10 - Stage-10 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: key, value - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: struct), _col1 (type: struct) - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: key, value - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: key, value - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: struct), _col1 (type: struct) - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: key, value - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - - Stage: Stage-3 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: key, value - Column Types: string, string - Table: default.t1 - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - - Stage: Stage-9 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: key, value - Column Types: string, string - Table: default.t2 - - Stage: Stage-10 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: struct), _col1 (type: struct) - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - -PREHOOK: query: from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select * -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted t1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc formatted t1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 -# col_name data_type comment -key string default -value string default - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 1 - numRows 1000 - rawDataSize 10624 - totalSize 11624 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted t2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: desc formatted t2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 -# col_name data_type comment -key string default -value string default - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 1 - numRows 1000 - rawDataSize 10624 - totalSize 11624 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: select count(1) from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -#### A masked pattern was here #### -1000