diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index a0a5f54..63c0d7d 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -636,6 +636,9 @@ HIVEJOINCACHESIZE("hive.join.cache.size", 25000, "How many rows in the joining tables (except the streaming table) should be cached in memory."), + // CBO related + HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Optiq framework."), + // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, // need to remove by hive .13. Also, do not change default (see SMB operator) HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), diff --git a/conf/hive-default.xml.template b/conf/hive-default.xml.template new file mode 100644 index 0000000..a3adeb3 --- /dev/null +++ b/conf/hive-default.xml.template @@ -0,0 +1,3216 @@ + + + + + + + + + + + hive.exec.script.wrapper + + + + + hive.exec.plan + + + + + hive.plan.serialization.format + kryo + + Query plan format serialization between client and task nodes. + Two supported values are : kryo and javaXML. Kryo is default. + + + + hive.exec.scratchdir + /tmp/hive-${system:user.name} + Scratch space for Hive jobs + + + hive.exec.local.scratchdir + ${system:java.io.tmpdir}/${system:user.name} + Local scratch space for Hive jobs + + + hive.scratch.dir.permission + 700 + + + + hive.exec.submitviachild + false + + + + hive.exec.submit.local.task.via.child + true + + Determines whether local tasks (typically mapjoin hashtable generation phase) runs in + separate JVM (true recommended) or not. + Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues. + + + + hive.exec.script.maxerrsize + 100000 + + Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). + This prevents runaway scripts from filling logs partitions to capacity + + + + hive.exec.script.allow.partial.consumption + false + + When enabled, this option allows a user script to exit successfully without consuming + all the data from the standard input. + + + + stream.stderr.reporter.prefix + reporter: + Streaming jobs that log to standard error with this prefix can log counter or status information. + + + stream.stderr.reporter.enabled + true + Enable consumption of status and counter messages for streaming jobs. + + + hive.exec.compress.output + false + + This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. + The compression codec and other options are determined from Hadoop config variables mapred.output.compress* + + + + hive.exec.compress.intermediate + false + + This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. + The compression codec and other options are determined from Hadoop config variables mapred.output.compress* + + + + hive.intermediate.compression.codec + + + + + hive.intermediate.compression.type + + + + + hive.exec.reducers.bytes.per.reducer + 1000000000 + size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers. + + + hive.exec.reducers.max + 999 + + max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is + negative, Hive will use this one as the max number of reducers when automatically determine number of reducers. + + + + hive.exec.pre.hooks + + + Comma-separated list of pre-execution hooks to be invoked for each statement. + A pre-execution hook is specified as the name of a Java class which implements the + org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. + + + + hive.exec.post.hooks + + + Comma-separated list of post-execution hooks to be invoked for each statement. + A post-execution hook is specified as the name of a Java class which implements the + org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. + + + + hive.exec.failure.hooks + + + Comma-separated list of on-failure hooks to be invoked for each statement. + An on-failure hook is specified as the name of Java class which implements the + org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. + + + + hive.client.stats.publishers + + + Comma-separated list of statistics publishers to be invoked on counters on each job. + A client stats publisher is specified as the name of a Java class which implements the + org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface. + + + + hive.exec.parallel + false + Whether to execute jobs in parallel + + + hive.exec.parallel.thread.number + 8 + How many jobs at most can be executed in parallel + + + hive.mapred.reduce.tasks.speculative.execution + true + Whether speculative execution for reducers should be turned on. + + + hive.exec.counters.pull.interval + 1000 + + The interval with which to poll the JobTracker for the counters the running job. + The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be. + + + + hive.exec.dynamic.partition + true + Whether or not to allow dynamic partitions in DML/DDL. + + + hive.exec.dynamic.partition.mode + strict + + In strict mode, the user must specify at least one static partition + in case the user accidentally overwrites all partitions. + + + + hive.exec.max.dynamic.partitions + 1000 + Maximum number of dynamic partitions allowed to be created in total. + + + hive.exec.max.dynamic.partitions.pernode + 100 + Maximum number of dynamic partitions allowed to be created in each mapper/reducer node. + + + hive.exec.max.created.files + 100000 + Maximum number of HDFS files created by all mappers/reducers in a MapReduce job. + + + hive.downloaded.resources.dir + ${system:java.io.tmpdir}/${hive.session.id}_resources + Temporary local directory for added resources in the remote file system. + + + hive.exec.default.partition.name + __HIVE_DEFAULT_PARTITION__ + + The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. + This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). + The user has to be aware that the dynamic partition value should not contain this value to avoid confusions. + + + + hive.lockmgr.zookeeper.default.partition.name + __HIVE_DEFAULT_ZOOKEEPER_PARTITION__ + + + + hive.exec.show.job.failure.debug.info + true + + If a job fails, whether to provide a link in the CLI to the task with the + most failures, along with debugging hints if applicable. + + + + hive.exec.job.debug.capture.stacktraces + true + + Whether or not stack traces parsed from the task logs of a sampled failed task + for each failed job should be stored in the SessionState + + + + hive.exec.job.debug.timeout + 30000 + + + + hive.exec.tasklog.debug.timeout + 20000 + + + + hive.output.file.extension + + + String used as a file extension for output files. + If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise. + + + + hive.exec.mode.local.auto + false + Let Hive determine whether to run in local mode automatically + + + hive.exec.mode.local.auto.inputbytes.max + 134217728 + When hive.exec.mode.local.auto is true, input bytes should less than this for local mode. + + + hive.exec.mode.local.auto.input.files.max + 4 + When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode. + + + hive.exec.drop.ignorenonexistent + true + Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view + + + hive.ignore.mapjoin.hint + true + Ignore the mapjoin hint + + + hive.file.max.footer + 100 + maximum number of lines for footer user can define for a table file + + + hive.resultset.use.unique.column.names + true + + Make column names unique in the result set by qualifying column names with table alias if needed. + Table alias will be added to column names for queries of type "select *" or + if query explicitly uses table alias "select r1.x..". + + + + fs.har.impl + org.apache.hadoop.hive.shims.HiveHarFileSystem + The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20 + + + hive.metastore.metadb.dir + + + + + hive.metastore.warehouse.dir + /user/hive/warehouse + location of default database for the warehouse + + + hive.metastore.uris + + Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore. + + + hive.metastore.connect.retries + 3 + Number of retries while opening a connection to metastore + + + hive.metastore.failure.retries + 1 + Number of retries upon failure of Thrift metastore calls + + + hive.metastore.client.connect.retry.delay + 1s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Number of seconds for the client to wait between consecutive connection attempts + + + + hive.metastore.client.socket.timeout + 600s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + MetaStore Client socket timeout in seconds + + + + javax.jdo.option.ConnectionPassword + mine + password to use against metastore database + + + hive.metastore.ds.connection.url.hook + + Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used + + + javax.jdo.option.Multithreaded + true + Set this to true if multiple threads access metastore through JDO concurrently. + + + javax.jdo.option.ConnectionURL + jdbc:derby:;databaseName=metastore_db;create=true + JDBC connect string for a JDBC metastore + + + hive.hmshandler.retry.attempts + 1 + The number of times to retry a HMSHandler call if there were a connection error. + + + hive.hmshandler.retry.interval + 1000ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + The time between HMSHandler retry attempts on failure. + + + + hive.hmshandler.force.reload.conf + false + + Whether to force reloading of the HMSHandler configuration (including + the connection URL, before the next metastore query that accesses the + datastore. Once reloaded, this value is reset to false. Used for + testing only. + + + + hive.metastore.server.min.threads + 200 + Minimum number of worker threads in the Thrift server's pool. + + + hive.metastore.server.max.threads + 100000 + Maximum number of worker threads in the Thrift server's pool. + + + hive.metastore.server.tcp.keepalive + true + Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections. + + + hive.metastore.archive.intermediate.original + _INTERMEDIATE_ORIGINAL + + Intermediate dir suffixes used for archiving. Not important what they + are, as long as collisions are avoided + + + + hive.metastore.archive.intermediate.archived + _INTERMEDIATE_ARCHIVED + + + + hive.metastore.archive.intermediate.extracted + _INTERMEDIATE_EXTRACTED + + + + hive.metastore.kerberos.keytab.file + + The path to the Kerberos Keytab file containing the metastore Thrift server's service principal. + + + hive.metastore.kerberos.principal + hive-metastore/_HOST@EXAMPLE.COM + + The service principal for the metastore Thrift server. + The special string _HOST will be replaced automatically with the correct host name. + + + + hive.metastore.sasl.enabled + false + If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos. + + + hive.metastore.thrift.framed.transport.enabled + false + If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used. + + + hive.cluster.delegation.token.store.class + org.apache.hadoop.hive.thrift.MemoryTokenStore + The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster. + + + hive.cluster.delegation.token.store.zookeeper.connectString + + The ZooKeeper token store connect string. + + + hive.cluster.delegation.token.store.zookeeper.znode + /hive/cluster/delegation + The root path for token store data. + + + hive.cluster.delegation.token.store.zookeeper.acl + + ACL for token store entries. List comma separated all server principals for the cluster. + + + hive.metastore.cache.pinobjtypes + Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order + List of comma separated metastore object types that should be pinned in the cache + + + datanucleus.connectionPoolingType + BONECP + Specify connection pool library for datanucleus + + + datanucleus.validateTables + false + validates existing schema against code. turn this on if you want to verify existing schema + + + datanucleus.validateColumns + false + validates existing schema against code. turn this on if you want to verify existing schema + + + datanucleus.validateConstraints + false + validates existing schema against code. turn this on if you want to verify existing schema + + + datanucleus.storeManagerType + rdbms + metadata store type + + + datanucleus.autoCreateSchema + true + creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once + + + datanucleus.fixedDatastore + false + + + + hive.metastore.schema.verification + false + + Enforce metastore schema version consistency. + True: Verify that version information stored in metastore matches with one from Hive jars. Also disable automatic + schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures + proper metastore schema migration. (Default) + False: Warn if the version information stored in metastore doesn't match with one from in Hive jars. + + + + datanucleus.autoStartMechanismMode + checked + throw exception if metadata tables are incorrect + + + datanucleus.transactionIsolation + read-committed + Default transaction isolation level for identity generation. + + + datanucleus.cache.level2 + false + Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server + + + datanucleus.cache.level2.type + none + + + + datanucleus.identifierFactory + datanucleus1 + + Name of the identifier factory to use when generating table/column names etc. + 'datanucleus1' is used for backward compatibility with DataNucleus v1 + + + + datanucleus.rdbms.useLegacyNativeValueStrategy + true + + + + datanucleus.plugin.pluginRegistryBundleCheck + LOG + Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE] + + + hive.metastore.batch.retrieve.max + 300 + + Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. + The higher the number, the less the number of round trips is needed to the Hive metastore server, + but it may also cause higher memory requirement at the client side. + + + + hive.metastore.batch.retrieve.table.partition.max + 1000 + Maximum number of table partitions that metastore internally retrieves in one batch. + + + hive.metastore.init.hooks + + + A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. + An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener. + + + + hive.metastore.pre.event.listeners + + List of comma separated listeners for metastore events. + + + hive.metastore.event.listeners + + + + + hive.metastore.authorization.storage.checks + false + + Should the metastore do authorization checks against the underlying storage (usually hdfs) + for operations like drop-partition (disallow the drop-partition if the user in + question doesn't have permissions to delete the corresponding directory + on the storage). + + + + hive.metastore.event.clean.freq + 0s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Frequency at which timer task runs to purge expired events in metastore. + + + + hive.metastore.event.expiry.duration + 0s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Duration after which events expire from events table + + + + hive.metastore.execute.setugi + true + + In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using + the client's reported user and group permissions. Note that this property must be set on + both the client and server sides. Further note that its best effort. + If client sets its to true and server sets it to false, client setting will be ignored. + + + + hive.metastore.partition.name.whitelist.pattern + + Partition names will be checked against this regex pattern and rejected if not matched. + + + hive.metastore.integral.jdo.pushdown + false + + Allow JDO query pushdown for integral partition columns in metastore. Off by default. This + improves metastore perf for integral columns, especially if there's a large number of partitions. + However, it doesn't work correctly with integral values that are not normalized (e.g. have + leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization + is also irrelevant. + + + + hive.metastore.try.direct.sql + true + + + + hive.metastore.try.direct.sql.ddl + true + + + + hive.metastore.disallow.incompatible.col.type.changes + false + + If true (default is false), ALTER TABLE operations which change the type of + a column (say STRING) to an incompatible type (say MAP<STRING, STRING>) are disallowed. + RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the + datatypes can be converted from string to any type. The map is also serialized as + a string, which can be read as a string as well. However, with any binary + serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions + when subsequently trying to access old partitions. + + Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are + not blocked. + + See HIVE-4409 for more details. + + + + hive.table.parameters.default + + Default property values for newly created tables + + + hive.ddl.createtablelike.properties.whitelist + + Table Properties to copy over when executing a Create Table Like. + + + hive.metastore.rawstore.impl + org.apache.hadoop.hive.metastore.ObjectStore + + Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. + This class is used to store and retrieval of raw metadata objects such as table, database + + + + javax.jdo.option.ConnectionDriverName + org.apache.derby.jdbc.EmbeddedDriver + Driver class name for a JDBC metastore + + + javax.jdo.PersistenceManagerFactoryClass + org.datanucleus.api.jdo.JDOPersistenceManagerFactory + class implementing the jdo persistence + + + hive.metastore.expression.proxy + org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore + + + + javax.jdo.option.DetachAllOnCommit + true + Detaches all objects from session so that they can be used after transaction is committed + + + javax.jdo.option.NonTransactionalRead + true + Reads outside of transactions + + + javax.jdo.option.ConnectionUserName + APP + Username to use against metastore database + + + hive.metastore.end.function.listeners + + List of comma separated listeners for the end of metastore functions. + + + hive.metastore.partition.inherit.table.properties + + + List of comma separated keys occurring in table properties which will get inherited to newly created partitions. + * implies all the keys will get inherited. + + + + hive.metadata.export.location + + + When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, + it is the location to which the metadata will be exported. The default is an empty string, which results in the + metadata being exported to the current user's home directory on HDFS. + + + + hive.metadata.move.exported.metadata.to.trash + true + + When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, + this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory + alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data. + + + + hive.cli.errors.ignore + false + + + + hive.cli.print.current.db + false + Whether to include the current database in the Hive prompt. + + + hive.cli.prompt + hive + + Command line prompt configuration value. Other hiveconf can be used in this configuration value. + Variable substitution will only be invoked at the Hive CLI startup. + + + + hive.cli.pretty.output.num.cols + -1 + + The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command. + If the value of this property is -1, then Hive will use the auto-detected terminal width. + + + + hive.metastore.fs.handler.class + org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl + + + + hive.session.id + + + + + hive.session.silent + false + + + + hive.session.history.enabled + false + Whether to log Hive query, query plan, runtime statistics etc. + + + hive.query.string + + Query being executed (might be multiple per a session) + + + hive.query.id + + ID for query being executed (might be multiple per a session) + + + hive.jobname.length + 50 + max jobname length + + + hive.jar.path + + + + + hive.aux.jars.path + + + + + hive.added.files.path + + + + + hive.added.jars.path + + + + + hive.added.archives.path + + + + + hive.auto.progress.timeout + 0s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + How long to run autoprogressor for the script/UDTF operators. + Set to 0 for forever. + + + + hive.table.name + + + + + hive.partition.name + + + + + hive.script.auto.progress + false + + Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker + to avoid the task getting killed because of inactivity. Hive sends progress information when the script is + outputting to stderr. This option removes the need of periodically producing stderr messages, + but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker. + + + + hive.script.operator.id.env.var + HIVE_SCRIPT_OPERATOR_ID + + Name of the environment variable that holds the unique script operator ID in the user's + transform function (the custom mapper/reducer that the user has specified in the query) + + + + hive.script.operator.truncate.env + false + Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits) + + + hive.mapred.mode + nonstrict + + The mode in which the Hive operations are being performed. + In strict mode, some risky queries are not allowed to run. They include: + Cartesian Product. + No partition being picked up for a query. + Comparing bigints and strings. + Comparing bigints and doubles. + Orderby without limit. + + + + hive.alias + + + + + hive.map.aggr + true + Whether to use map-side aggregation in Hive Group By queries + + + hive.groupby.skewindata + false + Whether there is skew in data to optimize group by queries + + + hive.optimize.multigroupby.common.distincts + true + + Whether to optimize a multi-groupby query with the same distinct. + Consider a query like: + + from src + insert overwrite table dest1 select col1, count(distinct colx) group by col1 + insert overwrite table dest2 select col2, count(distinct colx) group by col2; + + With this parameter set to true, first we spray by the distinct value (colx), and then + perform the 2 groups bys. This makes sense if map-side aggregation is turned off. However, + with maps-side aggregation, it might be useful in some cases to treat the 2 inserts independently, + thereby performing the query above in 2MR jobs instead of 3 (due to spraying by distinct key first). + If this parameter is turned off, we don't consider the fact that the distinct key is the same across + different MR jobs. + + + + hive.join.emit.interval + 1000 + How many rows in the right-most join operand Hive should buffer before emitting the join result. + + + hive.join.cache.size + 25000 + How many rows in the joining tables (except the streaming table) should be cached in memory. + + + hive.cbo.enable + true + Flag to control enabling Cost Based Optimizations using Optiq framework. + + + hive.mapjoin.bucket.cache.size + 100 + + + + hive.mapjoin.optimized.hashtable + true + + Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez, + because memory-optimized hashtable cannot be serialized. + + + + hive.mapjoin.optimized.keys + true + + Whether MapJoin hashtable should use optimized (size-wise), keys, allowing the table to take less + memory. Depending on key, the memory savings for entire table can be 5-15% or so. + + + + hive.mapjoin.lazy.hashtable + true + + Whether MapJoin hashtable should deserialize values on demand. Depending on how many values in + the table the join will actually touch, it can save a lot of memory by not creating objects for + rows that are not needed. If all rows are needed obviously there's no gain. + + + + hive.mapjoin.optimized.hashtable.wbsize + 10485760 + + Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to + store data. This is one buffer size. HT may be slightly faster if this is larger, but for small + joins unnecessary memory will be allocated and then trimmed. + + + + hive.smbjoin.cache.rows + 10000 + How many rows with the same key value should be cached in memory per smb joined table. + + + hive.groupby.mapaggr.checkinterval + 100000 + Number of rows after which size of the grouping keys/aggregation classes is performed + + + hive.map.aggr.hash.percentmemory + 0.5 + Portion of total memory to be used by map-side group aggregation hash table + + + hive.mapjoin.followby.map.aggr.hash.percentmemory + 0.3 + Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join + + + hive.map.aggr.hash.force.flush.memory.threshold + 0.9 + + The max memory to be used by map-side group aggregation hash table. + If the memory usage is higher than this number, force to flush data + + + + hive.map.aggr.hash.min.reduction + 0.5 + + Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. + Set to 1 to make sure hash aggregation is never turned off. + + + + hive.multigroupby.singlereducer + true + + Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has + common group by keys, it will be optimized to generate single M/R job. + + + + hive.map.groupby.sorted + false + + If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform + the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this + is that it limits the number of mappers to the number of files. + + + + hive.map.groupby.sorted.testmode + false + + If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform + the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan + is not converted, but a query property is set to denote the same. + + + + hive.groupby.orderby.position.alias + false + Whether to enable using Column Position Alias in Group By or Order By + + + hive.new.job.grouping.set.cardinality + 30 + + Whether a new map-reduce job should be launched for grouping sets/rollups/cubes. + For a query like: select a, b, c, count(1) from T group by a, b, c with rollup; + 4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null). + This can lead to explosion across map-reduce boundary if the cardinality of T is very high, + and map-side aggregation does not do a very good job. + + This parameter decides if Hive should add an additional map-reduce job. If the grouping set + cardinality (4 in the example above), is more than this value, a new MR job is added under the + assumption that the original group by will reduce the data size. + + + + hive.udtf.auto.progress + false + + Whether Hive should automatically send progress information to TaskTracker + when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious + because this may prevent TaskTracker from killing tasks with infinite loops. + + + + hive.default.fileformat + TextFile + + Expects one of [textfile, sequencefile, rcfile, orc]. + Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT] + + + + hive.query.result.fileformat + TextFile + + Expects one of [textfile, sequencefile, rcfile]. + Default file format for storing result of the query. + + + + hive.fileformat.check + true + Whether to check file format or not when loading data files + + + hive.default.rcfile.serde + org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + The default SerDe Hive will use for the RCFile format + + + hive.default.serde + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + The default SerDe Hive will use for storage formats that do not specify a SerDe. + + + hive.serdes.using.metastore.for.schema + org.apache.hadoop.hive.ql.io.orc.OrcSerde,org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe,org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe,org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe,org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe,org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe,org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + SerDes retriving schema from metastore. This an internal parameter. Check with the hive dev. team + + + hive.querylog.location + ${system:java.io.tmpdir}/${system:user.name} + Location of Hive run time structured log file + + + hive.querylog.enable.plan.progress + true + + Whether to log the plan's progress every time a job's progress is checked. + These logs are written to the location specified by hive.querylog.location + + + + hive.querylog.plan.progress.interval + 60000ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + The interval to wait between logging the plan's progress. + If there is a whole number percentage change in the progress of the mappers or the reducers, + the progress is logged regardless of this value. + The actual interval will be the ceiling of (this value divided by the value of + hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval + I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be + logged less frequently than specified. + This only has an effect if hive.querylog.enable.plan.progress is set to true. + + + + hive.script.serde + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + The default SerDe for transmitting input data to and reading output data from the user scripts. + + + hive.script.recordreader + org.apache.hadoop.hive.ql.exec.TextRecordReader + The default record reader for reading data from the user scripts. + + + hive.script.recordwriter + org.apache.hadoop.hive.ql.exec.TextRecordWriter + The default record writer for writing data to the user scripts. + + + hive.transform.escape.input + false + + This adds an option to escape special chars (newlines, carriage returns and + tabs) when they are passed to the user script. This is useful if the Hive tables + can contain data that contains special characters. + + + + hive.binary.record.max.length + 1000 + + Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. + The last record before the end of stream can have less than hive.binary.record.max.length bytes + + + + hive.hwi.listen.host + 0.0.0.0 + This is the host address the Hive Web Interface will listen on + + + hive.hwi.listen.port + 9999 + This is the port the Hive Web Interface will listen on + + + hive.hwi.war.file + ${env:HWI_WAR_FILE} + This sets the path to the HWI war file, relative to ${HIVE_HOME}. + + + hive.mapred.local.mem + 0 + mapper/reducer memory in local mode + + + hive.mapjoin.smalltable.filesize + 25000000 + + The threshold for the input file size of the small tables; if the file size is smaller + than this threshold, it will try to convert the common join into map join + + + + hive.sample.seednumber + 0 + A number used to percentage sampling. By changing this number, user will change the subsets of data sampled. + + + hive.test.mode + false + Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename. + + + hive.test.mode.prefix + test_ + In test mode, specfies prefixes for the output table + + + hive.test.mode.samplefreq + 32 + + In test mode, specfies sampling frequency for table, which is not bucketed, + For example, the following query: + INSERT OVERWRITE TABLE dest SELECT col1 from src + would be converted to + INSERT OVERWRITE TABLE test_dest + SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) + + + + hive.test.mode.nosamplelist + + In test mode, specifies comma separated table names which would not apply sampling + + + hive.test.dummystats.aggregator + + internal variable for test + + + hive.test.dummystats.publisher + + internal variable for test + + + hive.merge.mapfiles + true + Merge small files at the end of a map-only job + + + hive.merge.mapredfiles + false + Merge small files at the end of a map-reduce job + + + hive.merge.tezfiles + false + Merge small files at the end of a Tez DAG + + + hive.merge.size.per.task + 256000000 + Size of merged files at the end of the job + + + hive.merge.smallfiles.avgsize + 16000000 + + When the average output file size of a job is less than this number, Hive will start an additional + map-reduce job to merge the output files into bigger files. This is only done for map-only jobs + if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true. + + + + hive.merge.rcfile.block.level + true + + + + hive.merge.input.format.block.level + org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat + + + + hive.merge.orcfile.stripe.level + true + + When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a + table with ORC file format, enabling this config will do stripe level fast merge + for small ORC files. Note that enabling this config will not honor padding tolerance + config (hive.exec.orc.block.padding.tolerance). + + + + hive.merge.input.format.stripe.level + org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat + Input file format to use for ORC stripe level merging (for internal use only) + + + hive.merge.current.job.has.dynamic.partitions + false + + + + hive.exec.rcfile.use.explicit.header + true + + If this is set the header for RCFiles will simply be RCF. If this is not + set the header will be that borrowed from sequence files, e.g. SEQ- followed + by the input and output RCFile formats. + + + + hive.exec.rcfile.use.sync.cache + true + + + + hive.io.rcfile.record.interval + 2147483647 + + + + hive.io.rcfile.column.number.conf + 0 + + + + hive.io.rcfile.tolerate.corruptions + false + + + + hive.io.rcfile.record.buffer.size + 4194304 + + + + hive.exec.orc.memory.pool + 0.5 + Maximum fraction of heap that can be used by ORC file writers + + + hive.exec.orc.write.format + + Define the version of the file to write + + + hive.exec.orc.default.stripe.size + 67108864 + Define the default ORC stripe size + + + hive.exec.orc.default.block.size + 268435456 + Define the default file system block size for ORC files. + + + hive.exec.orc.dictionary.key.size.threshold + 0.8 + + If the number of keys in a dictionary is greater than this fraction of the total number of + non-null rows, turn off dictionary encoding. Use 1 to always use dictionary encoding. + + + + hive.exec.orc.default.row.index.stride + 10000 + Define the default ORC index stride + + + hive.orc.row.index.stride.dictionary.check + true + + If enabled dictionary check will happen after first row index stride (default 10000 rows) + else dictionary check will happen before writing first stripe. In both cases, the decision + to use dictionary or not will be retained thereafter. + + + + hive.exec.orc.default.buffer.size + 262144 + Define the default ORC buffer size + + + hive.exec.orc.default.block.padding + true + Define the default block padding + + + hive.exec.orc.block.padding.tolerance + 0.05 + + Define the tolerance for block padding as a percentage of stripe size. + For the defaults of 64Mb ORC stripe and 256Mb HDFS blocks, a maximum of 3.2Mb will be reserved for padding within the 256Mb block. + In that case, if the available size within the block is more than 3.2Mb, a new smaller stripe will be inserted to fit within that space. + This will make sure that no stripe written will cross block boundaries and cause remote reads within a node local task. + + + + hive.exec.orc.default.compress + ZLIB + Define the default compression codec for ORC file + + + hive.exec.orc.encoding.strategy + SPEED + + Expects one of [speed, compression]. + Define the encoding strategy to use while writing data. Changing this will + only affect the light weight encoding for integers. This flag will not + change the compression level of higher level compression codec (like ZLIB). + + + + hive.orc.splits.include.file.footer + false + + If turned on splits generated by orc will include metadata about the stripes in the file. This + data is read remotely (from the client or HS2 machine) and sent to all the tasks. + + + + hive.orc.cache.stripe.details.size + 10000 + Cache size for keeping meta info about orc splits cached in the client. + + + hive.orc.compute.splits.num.threads + 10 + How many threads orc should use to create splits in parallel. + + + hive.exec.orc.skip.corrupt.data + false + + If ORC reader encounters corrupt data, this value will be used to determine + whether to skip the corrupt data or throw exception. The default behavior is to throw exception. + + + + hive.exec.orc.zerocopy + false + Use zerocopy reads with ORC. + + + hive.lazysimple.extended_boolean_literal + false + + LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f', + '1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'. + The default is false, which means only 'TRUE' and 'FALSE' are treated as legal + boolean literal. + + + + hive.optimize.skewjoin + false + + Whether to enable skew join optimization. + The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of + processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce + job, process those skewed keys. The same key need not be skewed for all the tables, and so, + the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a + map-join. + + + + hive.auto.convert.join + true + Whether Hive enables the optimization about converting common join into mapjoin based on the input file size + + + hive.auto.convert.join.noconditionaltask + true + + Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. + If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the + specified size, the join is directly converted to a mapjoin (there is no conditional task). + + + + hive.auto.convert.join.noconditionaltask.size + 10000000 + + If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. + However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, + the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB + + + + hive.auto.convert.join.use.nonstaged + false + + For conditional joins, if input stream from a small alias can be directly applied to join operator without + filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task. + Currently, this is not working with vectorization or tez execution engine. + + + + hive.skewjoin.key + 100000 + + Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator, + we think the key as a skew join key. + + + + hive.skewjoin.mapjoin.map.tasks + 10000 + + Determine the number of map task used in the follow up map join job for a skew join. + It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control. + + + + hive.skewjoin.mapjoin.min.split + 33554432 + + Determine the number of map task at most used in the follow up map join job for a skew join by specifying + the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control. + + + + hive.heartbeat.interval + 1000 + Send a heartbeat after this interval - used by mapjoin and filter operators + + + hive.limit.row.max.size + 100000 + When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least. + + + hive.limit.optimize.limit.file + 10 + When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample. + + + hive.limit.optimize.enable + false + Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first. + + + hive.limit.optimize.fetch.max + 50000 + + Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. + Insert queries are not restricted by this limit. + + + + hive.limit.pushdown.memory.usage + -1.0 + The max memory to be used for hash in RS operator for top K selection. + + + hive.limit.query.max.table.partition + -1 + + This controls how many partitions can be scanned for each partitioned table. + The default value "-1" means no limit. + + + + hive.hashtable.key.count.adjustment + 1.0 + Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate of the number of keys is divided by this value. If the value is 0, statistics are not usedand hive.hashtable.initialCapacity is used instead. + + + hive.hashtable.initialCapacity + 100000 + Initial capacity of mapjoin hashtable if statistics are absent, or if hive.hashtable.stats.key.estimate.adjustment is set to 0 + + + hive.hashtable.loadfactor + 0.75 + + + + hive.mapjoin.followby.gby.localtask.max.memory.usage + 0.55 + + This number means how much memory the local task can take to hold the key/value into an in-memory hash table + when this map join is followed by a group by. If the local task's memory usage is more than this number, + the local task will abort by itself. It means the data of the small table is too large to be held in memory. + + + + hive.mapjoin.localtask.max.memory.usage + 0.9 + + This number means how much memory the local task can take to hold the key/value into an in-memory hash table. + If the local task's memory usage is more than this number, the local task will abort by itself. + It means the data of the small table is too large to be held in memory. + + + + hive.mapjoin.check.memory.rows + 100000 + The number means after how many rows processed it needs to check the memory usage + + + hive.debug.localtask + false + + + + hive.input.format + org.apache.hadoop.hive.ql.io.CombineHiveInputFormat + The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat. + + + hive.tez.input.format + org.apache.hadoop.hive.ql.io.HiveInputFormat + The default input format for tez. Tez groups splits in the AM. + + + hive.tez.container.size + -1 + By default Tez will spawn containers of the size of a mapper. This can be used to overwrite. + + + hive.tez.java.opts + + By default Tez will use the Java options from map tasks. This can be used to overwrite. + + + hive.tez.log.level + INFO + + The log level to use for tasks executing as part of the DAG. + Used only if hive.tez.java.opts is used to configure Java options. + + + + hive.enforce.bucketing + false + Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced. + + + hive.enforce.sorting + false + Whether sorting is enforced. If true, while inserting into the table, sorting is enforced. + + + hive.optimize.bucketingsorting + true + + If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing + bucketing/sorting for queries of the form: + insert overwrite table T2 select * from T1; + where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets. + + + + hive.mapred.partitioner + org.apache.hadoop.hive.ql.io.DefaultHivePartitioner + + + + hive.enforce.sortmergebucketmapjoin + false + If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ? + + + hive.enforce.bucketmapjoin + false + + If the user asked for bucketed map-side join, and it cannot be performed, + should the query fail or not ? For example, if the buckets in the tables being joined are + not a multiple of each other, bucketed map-side join cannot be performed, and the + query will fail if hive.enforce.bucketmapjoin is set to true. + + + + hive.auto.convert.sortmerge.join + false + Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join. + + + hive.auto.convert.sortmerge.join.bigtable.selection.policy + org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ + + The policy to choose the big table for automatic conversion to sort-merge join. + By default, the table with the largest partitions is assigned the big table. All policies are: + . based on position of the table - the leftmost table is selected + org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ. + . based on total size (all the partitions selected in the query) of the table + org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ. + . based on average size (all the partitions selected in the query) of the table + org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ. + New policies can be added in future. + + + + hive.auto.convert.sortmerge.join.to.mapjoin + false + + If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, + this parameter decides whether each table should be tried as a big table, and effectively a map-join should be + tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the + big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a + sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted + and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table + with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster + if the complete small table can fit in memory, and a map-join can be performed. + + + + hive.exec.script.trust + false + + + + hive.exec.rowoffset + false + Whether to provide the row offset virtual column + + + hive.hadoop.supports.splittable.combineinputformat + false + + + + hive.optimize.index.filter + false + Whether to enable automatic use of indexes + + + hive.optimize.index.autoupdate + false + Whether to update stale indexes automatically + + + hive.optimize.ppd + true + Whether to enable predicate pushdown + + + hive.ppd.recognizetransivity + true + Whether to transitively replicate predicate filters over equijoin conditions. + + + hive.ppd.remove.duplicatefilters + true + Whether to push predicates down into storage handlers. Ignored when hive.optimize.ppd is false. + + + hive.optimize.constant.propagation + true + Whether to enable constant propagation optimizer + + + hive.optimize.metadataonly + true + + + + hive.optimize.null.scan + true + Dont scan relations which are guaranteed to not generate any rows + + + hive.optimize.ppd.storage + true + Whether to push predicates down to storage handlers + + + hive.optimize.groupby + true + Whether to enable the bucketed group by from bucketed partitions/tables. + + + hive.optimize.bucketmapjoin + false + Whether to try bucket mapjoin + + + hive.optimize.bucketmapjoin.sortedmerge + false + Whether to try sorted bucket merge map join + + + hive.optimize.reducededuplication + true + + Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. + This should always be set to true. Since it is a new feature, it has been made configurable. + + + + hive.optimize.reducededuplication.min.reducer + 4 + + Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. + That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR. + The optimization will be automatically disabled if number of reducers would be less than specified value. + + + + hive.optimize.sort.dynamic.partition + true + + When enabled dynamic partitioning column will be globally sorted. + This way we can keep only one record writer open for each partition value + in the reducer thereby reducing the memory pressure on reducers. + + + + hive.optimize.sampling.orderby + false + Uses sampling on order-by clause for parallel execution. + + + hive.optimize.sampling.orderby.number + 1000 + Total number of samples to be obtained. + + + hive.optimize.sampling.orderby.percent + 0.1 + + Expects value between 0.0f and 1.0f. + Probability with which a row will be chosen. + + + + hive.optimize.union.remove + false + + Whether to remove the union and push the operators between union and the filesink above union. + This avoids an extra scan of the output by union. This is independently useful for union + queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an + extra union is inserted. + + The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true. + If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the + number of reducers are few, so the number of files anyway are small. However, with this optimization, + we are increasing the number of files possibly by a big margin. So, we merge aggressively. + + + + hive.optimize.correlation + false + exploit intra-query correlations. + + + hive.mapred.supports.subdirectories + false + + Whether the version of Hadoop which is running supports sub-directories for tables/partitions. + Many Hive optimizations can be applied if the Hadoop version supports sub-directories for + tables/partitions. It was added by MAPREDUCE-1501 + + + + hive.optimize.skewjoin.compiletime + false + + Whether to create a separate plan for skewed keys for the tables in the join. + This is based on the skewed keys stored in the metadata. At compile time, the plan is broken + into different joins: one for the skewed keys, and the other for the remaining keys. And then, + a union is performed for the 2 joins generated above. So unless the same skewed key is present + in both the joined tables, the join for the skewed key will be performed as a map-side join. + + The main difference between this parameter and hive.optimize.skewjoin is that this parameter + uses the skew information stored in the metastore to optimize the plan at compile time itself. + If there is no skew information in the metadata, this parameter will not have any affect. + Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true. + Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing + so for backward compatibility. + + If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime + would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op. + + + + hive.optimize.index.filter.compact.minsize + 5368709120 + Minimum size (in bytes) of the inputs on which a compact index is automatically used. + + + hive.optimize.index.filter.compact.maxsize + -1 + Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity. + + + hive.index.compact.query.max.entries + 10000000 + The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity. + + + hive.index.compact.query.max.size + 10737418240 + The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity. + + + hive.index.compact.binary.search + true + Whether or not to use a binary search to find the entries in an index table that match the filter, where possible + + + hive.stats.autogather + true + A flag to gather statistics automatically during the INSERT OVERWRITE command. + + + hive.stats.dbclass + fs + + Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs]. + The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported. + + + + hive.stats.jdbcdriver + org.apache.derby.jdbc.EmbeddedDriver + The JDBC driver for the database that stores temporary Hive statistics. + + + hive.stats.dbconnectionstring + jdbc:derby:;databaseName=TempStatsStore;create=true + The default connection string for the database that stores temporary Hive statistics. + + + hive.stats.default.publisher + + The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type. + + + hive.stats.default.aggregator + + The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type. + + + hive.stats.jdbc.timeout + 30s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Timeout value used by JDBC connection and statements. + + + + hive.stats.atomic + false + whether to update metastore stats only if all stats are available + + + hive.stats.retries.max + 0 + + Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. + Default is no tries on failures. + + + + hive.stats.retries.wait + 3000ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + The base waiting window before the next retry. The actual wait time is calculated by baseWindow * failures baseWindow * (failure + 1) * (random number between [0.0,1.0]). + + + + hive.stats.collect.rawdatasize + true + should the raw data size be collected when analyzing tables + + + hive.client.stats.counters + + + Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). + Non-display names should be used + + + + hive.stats.reliable + false + + Whether queries will fail because stats cannot be collected completely accurately. + If this is set to true, reading/writing from/into a partition may fail because the stats + could not be computed accurately. + + + + hive.analyze.stmt.collect.partlevel.stats + true + analyze table T compute statistics for columns. Queries like these should compute partitionlevel stats for partitioned table even when no part spec is specified. + + + hive.stats.gather.num.threads + 10 + + Number of threads used by partialscan/noscan analyze command for partitioned tables. + This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC). + + + + hive.stats.collect.tablekeys + false + + Whether join and group by keys on tables are derived and maintained in the QueryPlan. + This is useful to identify how tables are accessed and to determine if they should be bucketed. + + + + hive.stats.collect.scancols + false + + Whether column accesses are tracked in the QueryPlan. + This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed. + + + + hive.stats.ndv.error + 20.0 + + Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. + A lower value for error indicates higher accuracy and a higher compute cost. + + + + hive.stats.key.prefix.max.length + 150 + + Determines if when the prefix of the key used for intermediate stats collection + exceeds a certain length, a hash of the key is used instead. If the value < 0 then hashing + + + + hive.stats.key.prefix.reserve.length + 24 + + Reserved length for postfix of stats key. Currently only meaningful for counter type which should + keep length of full stats key smaller than max length configured by hive.stats.key.prefix.max.length. + For counter type, it should be bigger than the length of LB spec if exists. + + + + hive.stats.max.variable.length + 100 + + To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.), + average row size is multiplied with the total number of rows coming out of each operator. + Average row size is computed from average column size of all columns in the row. In the absence + of column statistics, for variable length columns (like string, bytes etc.), this value will be + used. For fixed length columns their corresponding Java equivalent sizes are used + (float - 4 bytes, double - 8 bytes etc.). + + + + hive.stats.list.num.entries + 10 + + To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.), + average row size is multiplied with the total number of rows coming out of each operator. + Average row size is computed from average column size of all columns in the row. In the absence + of column statistics and for variable length complex columns like list, the average number of + entries/values can be specified using this config. + + + + hive.stats.map.num.entries + 10 + + To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.), + average row size is multiplied with the total number of rows coming out of each operator. + Average row size is computed from average column size of all columns in the row. In the absence + of column statistics and for variable length complex columns like map, the average number of + entries/values can be specified using this config. + + + + hive.stats.map.parallelism + 1 + + Hive/Tez optimizer estimates the data size flowing through each of the operators. + For GROUPBY operator, to accurately compute the data size map-side parallelism needs to + be known. By default, this value is set to 1 since optimizer is not aware of the number of + mappers during compile-time. This Hive config can be used to specify the number of mappers + to be used for data size computation of GROUPBY operator. + + + + hive.stats.fetch.partition.stats + true + + Annotation of operator tree with statistics information requires partition level basic + statistics like number of rows, data size and file size. Partition statistics are fetched from + metastore. Fetching partition statistics for each needed partition can be expensive when the + number of partitions is high. This flag can be used to disable fetching of partition statistics + from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes + and will estimate the number of rows from row schema. + + + + hive.stats.fetch.column.stats + false + + Annotation of operator tree with statistics information requires column statistics. + Column statistics are fetched from metastore. Fetching column statistics for each needed column + can be expensive when the number of columns is high. This flag can be used to disable fetching + of column statistics from metastore. + + + + hive.stats.join.factor + 1.1 + + Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator + uses column statistics to estimate the number of rows flowing out of it and hence the data size. + In the absence of column statistics, this factor determines the amount of rows that flows out + of JOIN operator. + + + + hive.stats.deserialization.factor + 1.0 + + Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence + of basic statistics like number of rows and data size, file size is used to estimate the number + of rows and data size. Since files in tables/partitions are serialized (and optionally + compressed) the estimates of number of rows and data size cannot be reliably determined. + This factor is multiplied with the file size to account for serialization and compression. + + + + hive.support.concurrency + false + + Whether Hive supports concurrency control or not. + A ZooKeeper instance must be up and running when using zookeeper Hive lock manager + + + + hive.lock.manager + org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager + + + + hive.lock.numretries + 100 + The number of times you want to try to get all the locks + + + hive.unlock.numretries + 10 + The number of times you want to retry to do one unlock + + + hive.lock.sleep.between.retries + 60s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + The sleep time between various retries + + + + hive.lock.mapred.only.operation + false + + This param is to control whether or not only do lock on queries + that need to execute at least one mapred job. + + + + hive.zookeeper.quorum + + The list of ZooKeeper servers to talk to. This is only needed for read/write locks. + + + hive.zookeeper.client.port + 2181 + The port of ZooKeeper servers to talk to. This is only needed for read/write locks. + + + hive.zookeeper.session.timeout + 600000 + + ZooKeeper client's session timeout. The client is disconnected, and as a result, all locks released, + if a heartbeat is not sent in the timeout. + + + + hive.zookeeper.namespace + hive_zookeeper_namespace + The parent node under which all ZooKeeper nodes are created. + + + hive.zookeeper.clean.extra.nodes + false + Clean extra nodes at the end of the session. + + + hive.txn.manager + org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager + + + + hive.txn.timeout + 300s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + time after which transactions are declared aborted if the client has not sent a heartbeat. + + + + hive.txn.max.open.batch + 1000 + + Maximum number of transactions that can be fetched in one call to open_txns(). + Increasing this will decrease the number of delta files created when + streaming data into Hive. But it will also increase the number of + open transactions at any given time, possibly impacting read performance. + + + + hive.compactor.initiator.on + false + Whether to run the compactor's initiator thread in this metastore instance or not. + + + hive.compactor.worker.threads + 0 + Number of compactor worker threads to run on this metastore instance. + + + hive.compactor.worker.timeout + 86400s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Time before a given compaction in working state is declared a failure + and returned to the initiated state. + + + + hive.compactor.check.interval + 300s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Time between checks to see if any partitions need compacted. + This should be kept high because each check for compaction requires many calls against the NameNode. + + + + hive.compactor.delta.num.threshold + 10 + + Number of delta files that must exist in a directory before the compactor will attempt + a minor compaction. + + + + hive.compactor.delta.pct.threshold + 0.1 + Percentage (by size) of base that deltas can be before major compaction is initiated. + + + hive.compactor.abortedtxn.threshold + 1000 + + Number of aborted transactions involving a particular table or partition before major + compaction is initiated. + + + + hive.hbase.wal.enabled + true + + Whether writes to HBase should be forced to the write-ahead log. + Disabling this improves HBase write performance at the risk of lost writes in case of a crash. + + + + hive.hbase.generatehfiles + false + True when HBaseStorageHandler should generate hfiles instead of operate against the online table. + + + hive.hbase.snapshot.name + + The HBase table snapshot name to use. + + + hive.hbase.snapshot.restoredir + /tmp + The directory in which to restore the HBase table snapshot. + + + hive.archive.enabled + false + Whether archiving operations are permitted + + + hive.optimize.index.groupby + false + Whether to enable optimization of group-by queries using Aggregate indexes. + + + hive.outerjoin.supports.filters + true + + + + hive.fetch.task.conversion + more + + Expects one of [minimal, more]. + Some select queries can be converted to single FETCH task minimizing latency. + Currently the query should be single sourced not having any subquery and should not have + any aggregations or distincts (which incurs RS), lateral views and joins. + 1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only + 2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns) + + + + hive.fetch.task.conversion.threshold + 1073741824 + + Input threshold for applying hive.fetch.task.conversion. If target table is native, input length + is calculated by summation of file lengths. If it's not native, storage handler for the table + can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface. + + + + hive.fetch.task.aggr + false + + Aggregation queries with no group-by clause (for example, select count(*) from src) execute + final aggregations in single reduce task. If this is set true, Hive delegates final aggregation + stage to fetch task, possibly decreasing the query time. + + + + hive.compute.query.using.stats + false + + When set to true Hive will answer a few queries like count(1) purely using stats + stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true. + For more advanced stats collection need to run analyze table queries. + + + + hive.fetch.output.serde + org.apache.hadoop.hive.serde2.DelimitedJSONSerDe + The SerDe used by FetchTask to serialize the fetch output. + + + hive.cache.expr.evaluation + true + If true, evaluation result of deterministic expression referenced twice or more will be cached. + + + hive.variable.substitute + true + This enables substitution using syntax like ${var} ${system:var} and ${env:var}. + + + hive.variable.substitute.depth + 40 + The maximum replacements the substitution engine will do. + + + hive.conf.validation + true + Enables type checking for registered Hive configurations + + + hive.semantic.analyzer.hook + + + + + hive.security.authorization.enabled + false + enable or disable the Hive client authorization + + + hive.security.authorization.manager + org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider + + The Hive client authorization manager class name. The user defined authorization class should implement + interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. + + + + hive.security.authenticator.manager + org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator + + hive client authenticator manager class name. The user defined authenticator should implement + interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. + + + + hive.security.metastore.authorization.manager + org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider + + authorization manager class name to be used in the metastore for authorization. + The user defined authorization class should implement interface + org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider. + + + + hive.security.metastore.authenticator.manager + org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator + + authenticator manager class name to be used in the metastore for authentication. + The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. + + + + hive.security.authorization.createtable.user.grants + + + the privileges automatically granted to some users whenever a table gets created. + An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY, + and grant create privilege to userZ whenever a new table created. + + + + hive.security.authorization.createtable.group.grants + + + the privileges automatically granted to some groups whenever a table gets created. + An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY, + and grant create privilege to groupZ whenever a new table created. + + + + hive.security.authorization.createtable.role.grants + + + the privileges automatically granted to some roles whenever a table gets created. + An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY, + and grant create privilege to roleZ whenever a new table created. + + + + hive.security.authorization.createtable.owner.grants + + + the privileges automatically granted to the owner whenever a table gets created. + An example like "select,drop" will grant select and drop privilege to the owner of the table + + + + hive.security.authorization.sqlstd.confwhitelist + + interal variable. List of modifiable configurations by user. + + + hive.cli.print.header + false + Whether to print the names of the columns in query output. + + + hive.error.on.empty.partition + false + Whether to throw an exception if dynamic partition insert generates empty results. + + + hive.index.compact.file + + internal variable + + + hive.index.blockfilter.file + + internal variable + + + hive.index.compact.file.ignore.hdfs + false + + When true the HDFS location stored in the index file will be ignored at runtime. + If the data got moved or the name of the cluster got changed, the index data should still be usable. + + + + hive.exim.uri.scheme.whitelist + hdfs,pfile + A comma separated list of acceptable URI schemes for import and export. + + + hive.mapper.cannot.span.multiple.partitions + false + + + + hive.rework.mapredwork + false + + should rework the mapred work or not. + This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time. + + + + hive.exec.concatenate.check.index + true + + If this is set to true, Hive will throw error when doing + 'alter table tbl_name [partSpec] concatenate' on a table/partition + that has indexes on it. The reason the user want to set this to true + is because it can help user to avoid handling all index drop, recreation, + rebuild work. This is very helpful for tables with thousands of partitions. + + + + hive.io.exception.handlers + + + A list of io exception handler class names. This is used + to construct a list exception handlers to handle exceptions thrown + by record readers + + + + hive.server2.logging.operation.enabled + true + When true, HS2 will save operation logs + + + hive.server2.logging.operation.log.location + ${system:java.io.tmpdir}/${system:user.name}/operation_logs + Top level directory where operation logs are stored if logging functionality is enabled + + + hive.log4j.file + + + Hive log4j configuration file. + If the property is not set, then logging will be initialized using hive-log4j.properties found on the classpath. + If the property is set, the value must be a valid URI (java.net.URI, e.g. "file:///tmp/my-logging.properties"), + which you can then extract a URL from and pass to PropertyConfigurator.configure(URL). + + + + hive.exec.log4j.file + + + Hive log4j configuration file for execution mode(sub command). + If the property is not set, then logging will be initialized using hive-exec-log4j.properties found on the classpath. + If the property is set, the value must be a valid URI (java.net.URI, e.g. "file:///tmp/my-logging.properties"), + which you can then extract a URL from and pass to PropertyConfigurator.configure(URL). + + + + hive.server2.global.init.file.location + ${env:HIVE_CONF_DIR} + + The location of HS2 global init file (.hiverc). + If the property is reset, the value must be a valid path where the init file is located. + + + + hive.autogen.columnalias.prefix.label + _c + + String used as a prefix when auto generating column alias. + By default the prefix label will be appended with a column position number to form the column alias. + Auto generation would happen if an aggregate function is used in a select clause without an explicit alias. + + + + hive.autogen.columnalias.prefix.includefuncname + false + Whether to include function name in the column alias auto generated by Hive. + + + hive.exec.perf.logger + org.apache.hadoop.hive.ql.log.PerfLogger + + The class responsible for logging client side performance metrics. + Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger + + + + hive.start.cleanup.scratchdir + false + To cleanup the Hive scratchdir when starting the Hive Server + + + hive.insert.into.multilevel.dirs + false + + Where to insert into multilevel directories like + "insert directory '/HIVEFT25686/chinna/' from table" + + + + hive.warehouse.subdir.inherit.perms + false + + Set this to true if the the table directories should inherit the + permission of the warehouse or database directory instead of being created + with the permissions derived from dfs umask + + + + hive.insert.into.external.tables + true + whether insert into external tables is allowed + + + hive.exec.driver.run.hooks + + A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning and end of Driver.run, these will be run in the order specified. + + + hive.ddl.output.format + + + The data format to use for DDL output. One of "text" (for human + readable text) or "json" (for a json object). + + + + hive.entity.separator + @ + Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname + + + hive.display.partition.cols.separately + true + + In older Hive version (0.10 and earlier) no distinction was made between + partition columns or non-partition columns while displaying columns in describe + table. From 0.12 onwards, they are displayed separately. This flag will let you + get old behavior, if desired. See, test-case in patch for HIVE-6689. + + + + hive.server2.max.start.attempts + 30 + + Expects value bigger than 0. + This number of times HiveServer2 will attempt to start before exiting, sleeping 60 seconds between retries. + The default of 30 will keep trying for 30 minutes. + + + + hive.server2.transport.mode + binary + + Expects one of [binary, http]. + Transport mode of HiveServer2. + + + + hive.server2.thrift.http.port + 10001 + Port number when in HTTP mode. + + + hive.server2.thrift.http.path + cliservice + Path component of URL endpoint when in HTTP mode. + + + hive.server2.thrift.http.min.worker.threads + 5 + Minimum number of worker threads when in HTTP mode. + + + hive.server2.thrift.http.max.worker.threads + 500 + Maximum number of worker threads when in HTTP mode. + + + hive.server2.thrift.http.max.idle.time + 1800s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + Maximum idle time for a connection on the server when in HTTP mode. + + + + hive.server2.thrift.http.worker.keepalive.time + 60s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, excessive threads are killed after this time interval. + + + + hive.server2.thrift.port + 10000 + + Port number of HiveServer2 Thrift interface. + Can be overridden by setting $HIVE_SERVER2_THRIFT_PORT + + + + hive.server2.thrift.bind.host + + + Bind host on which to run the HiveServer2 Thrift interface. + Can be overridden by setting $HIVE_SERVER2_THRIFT_BIND_HOST + + + + hive.server2.thrift.sasl.qop + auth + + Expects one of [auth, auth-int, auth-conf]. + Sasl QOP value; Set it to one of following values to enable higher levels of + protection for HiveServer2 communication with clients. + "auth" - authentication only (default) + "auth-int" - authentication plus integrity protection + "auth-conf" - authentication plus integrity and confidentiality protection + This is applicable only if HiveServer2 is configured to use Kerberos authentication. + + + + hive.server2.thrift.min.worker.threads + 5 + Minimum number of Thrift worker threads + + + hive.server2.thrift.max.worker.threads + 500 + Maximum number of Thrift worker threads + + + hive.server2.thrift.worker.keepalive.time + 60s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, excessive threads are killed after this time interval. + + + + hive.server2.async.exec.threads + 100 + Number of threads in the async thread pool for HiveServer2 + + + hive.server2.async.exec.shutdown.timeout + 10s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Maximum time for which HiveServer2 shutdown will wait for async + + + + hive.server2.async.exec.wait.queue.size + 100 + + Size of the wait queue for async thread pool in HiveServer2. + After hitting this limit, the async thread pool will reject new requests. + + + + hive.server2.async.exec.keepalive.time + 10s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task + to arrive before terminating + + + + hive.server2.long.polling.timeout + 5000ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + Time that HiveServer2 will wait before responding to asynchronous calls that use long polling + + + + hive.server2.authentication + NONE + + Expects one of [nosasl, none, ldap, kerberos, pam, custom]. + Client authentication types. + NONE: no authentication check + LDAP: LDAP/AD based authentication + KERBEROS: Kerberos/GSSAPI authentication + CUSTOM: Custom authentication provider + (Use with property hive.server2.custom.authentication.class) + + + + hive.server2.allow.user.substitution + true + Allow alternate user to be specified as part of HiveServer2 open connection request. + + + hive.server2.authentication.kerberos.keytab + + Kerberos keytab file for server principal + + + hive.server2.authentication.kerberos.principal + + Kerberos server principal + + + hive.server2.authentication.spnego.keytab + + + keytab file for SPNego principal, optional, + typical value would look like /etc/security/keytabs/spnego.service.keytab, + This keytab would be used by HiveServer2 when Kerberos security is enabled and + HTTP transport mode is used. + This needs to be set only if SPNEGO is to be used in authentication. + SPNego authentication would be honored only if valid + hive.server2.authentication.spnego.principal + and + hive.server2.authentication.spnego.keytab + are specified. + + + + hive.server2.authentication.spnego.principal + + + SPNego service principal, optional, + typical value would look like HTTP/_HOST@EXAMPLE.COM + SPNego service principal would be used by HiveServer2 when Kerberos security is enabled + and HTTP transport mode is used. + This needs to be set only if SPNEGO is to be used in authentication. + + + + hive.server2.authentication.ldap.url + + LDAP connection URL + + + hive.server2.authentication.ldap.baseDN + + LDAP base DN + + + hive.server2.authentication.ldap.Domain + + + + + hive.server2.custom.authentication.class + + + Custom authentication class. Used when property + 'hive.server2.authentication' is set to 'CUSTOM'. Provided class + must be a proper implementation of the interface + org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2 + will call its Authenticate(user, passed) method to authenticate requests. + The implementation may optionally implement Hadoop's + org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object. + + + + hive.server2.authentication.pam.services + + + List of the underlying pam services that should be used when auth type is PAM + A file with the same name must exist in /etc/pam.d + + + + hive.server2.enable.doAs + true + + Setting this property to true will have HiveServer2 execute + Hive operations as the user making the calls to it. + + + + hive.server2.table.type.mapping + CLASSIC + + Expects one of [classic, hive]. + This setting reflects how HiveServer2 will report the table types for JDBC and other + client implementations that retrieve the available tables and supported table types + HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW + CLASSIC : More generic types like TABLE and VIEW + + + + hive.server2.session.hook + + + + + hive.server2.use.SSL + false + + + + hive.server2.keystore.path + + + + + hive.server2.keystore.password + + + + + hive.security.command.whitelist + set,reset,dfs,add,list,delete,compile + Comma separated list of non-SQL Hive commands users are authorized to execute + + + hive.server2.session.check.interval + 0ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + The time should be bigger than or equal to 3000 msec. + The check interval for session/operation timeout, which can be disabled by setting to zero or negative value. + + + + hive.server2.idle.session.timeout + 0ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value. + + + + hive.server2.idle.operation.timeout + 0ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value. + With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR). + With negative value, it's checked for all of the operations regardless of state. + + + + hive.conf.restricted.list + hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role + Comma separated list of configuration options which are immutable at runtime + + + hive.multi.insert.move.tasks.share.dependencies + false + + If this is set all move tasks for tables/partitions (not directories) at the end of a + multi-insert query will only begin once the dependencies for all these move tasks have been + met. + Advantages: If concurrency is enabled, the locks will only be released once the query has + finished, so with this config enabled, the time when the table/partition is + generated will be much closer to when the lock on it is released. + Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which + are produced by this query and finish earlier will be available for querying + much earlier. Since the locks are only released once the query finishes, this + does not apply if concurrency is enabled. + + + + hive.exec.infer.bucket.sort + false + + If this is set, when writing partitions, the metadata will include the bucketing/sorting + properties with which the data was written if any (this will not overwrite the metadata + inherited from the table if the table is bucketed/sorted) + + + + hive.exec.infer.bucket.sort.num.buckets.power.two + false + + If this is set, when setting the number of reducers for the map reduce task which writes the + final output files, it will choose a number which is a power of two, unless the user specifies + the number of reducers to use using mapred.reduce.tasks. The number of reducers + may be set to a power of two, only to be followed by a merge task meaning preventing + anything from being inferred. + With hive.exec.infer.bucket.sort set to true: + Advantages: If this is not set, the number of buckets for partitions will seem arbitrary, + which means that the number of mappers used for optimized joins, for example, will + be very low. With this set, since the number of buckets used for any partition is + a power of two, the number of mappers used for optimized joins will be the least + number of buckets used by any partition being joined. + Disadvantages: This may mean a much larger or much smaller number of reducers being used in the + final map reduce job, e.g. if a job was originally going to take 257 reducers, + it will now take 512 reducers, similarly if the max number of reducers is 511, + and a job was going to use this many, it will now use 256 reducers. + + + + hive.merge.current.job.concatenate.list.bucketing + true + + + + hive.merge.current.job.concatenate.list.bucketing.depth + 0 + + + + hive.optimize.listbucketing + false + Enable list bucketing optimizer. Default value is false so that we disable it by default. + + + hive.server.read.socket.timeout + 10s + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified. + Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds. + + + + hive.server.tcp.keepalive + true + Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections. + + + hive.decode.partition.name + false + Whether to show the unquoted partition names in query results. + + + hive.execution.engine + mr + + Expects one of [mr, tez]. + Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only) + + + + hive.jar.directory + + + This is the location hive in tez mode will look for to find a site wide + installed hive instance. + + + + hive.user.install.directory + hdfs:///user/ + + If hive (in tez mode only) cannot find a usable hive jar in "hive.jar.directory", + it will upload the hive jar to "hive.user.install.directory/user.name" + and use it to run queries. + + + + hive.vectorized.execution.enabled + false + + This flag should be set to true to enable vectorized mode of query execution. + The default value is false. + + + + hive.vectorized.groupby.checkinterval + 100000 + Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed. + + + hive.vectorized.groupby.maxentries + 1000000 + + Max number of entries in the vector group by aggregation hashtables. + Exceeding this will trigger a flush irrelevant of memory pressure condition. + + + + hive.vectorized.groupby.flush.percent + 0.1 + Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded. + + + hive.typecheck.on.insert + true + + + + hive.rpc.query.plan + false + Whether to send the query plan via local resource or RPC + + + hive.compute.splits.in.am + true + Whether to generate the splits locally or in the AM (tez only) + + + hive.prewarm.enabled + false + Enables container prewarm for Tez (Hadoop 2 only) + + + hive.prewarm.numcontainers + 10 + Controls the number of containers to prewarm for Tez (Hadoop 2 only) + + + hive.stageid.rearrange + none + + Expects one of [none, idonly, traverse, execution]. + + + + hive.explain.dependency.append.tasktype + false + + + + hive.counters.group.name + HIVE + The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.) + + + hive.server2.tez.default.queues + + + A list of comma separated values corresponding to YARN queues of the same name. + When HiveServer2 is launched in Tez mode, this configuration needs to be set + for multiple Tez sessions to run in parallel on the cluster. + + + + hive.server2.tez.sessions.per.default.queue + 1 + + A positive integer that determines the number of Tez sessions that should be + launched on each of the queues specified by "hive.server2.tez.default.queues". + Determines the parallelism on each queue. + + + + hive.server2.tez.initialize.default.sessions + false + + This flag is used in HiveServer2 to enable a user to use HiveServer2 without + turning on Tez for HiveServer2. The user could potentially want to run queries + over Tez without the pool of sessions. + + + + hive.support.quoted.identifiers + column + + Expects one of [none, column]. + Whether to use quoted identifier. 'none' or 'column' can be used. + none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers. + column: implies column names can contain any character. + + + + hive.users.in.admin.role + + + Comma separated list of users who are in admin role for bootstrapping. + More users can be added in ADMIN role later. + + + + hive.compat + 0.12 + + Enable (configurable) deprecated behaviors by setting desired level of backward compatibility. + Setting to 0.12: + Maintains division behavior: int / int = double + + + + hive.convert.join.bucket.mapjoin.tez + false + + Whether joins can be automatically converted to bucket map joins in hive + when tez is used as the execution engine. + + + + hive.exec.check.crossproducts + true + Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console. + + + hive.localize.resource.wait.interval + 5000ms + + Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified. + Time to wait for another thread to localize the same resource for hive-tez. + + + + hive.localize.resource.num.wait.attempts + 5 + The number of attempts waiting for localizing a resource in hive-tez. + + + hive.tez.auto.reducer.parallelism + false + + Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes + and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as + necessary. + + + + hive.tez.max.partition.factor + 2.0 + When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges. + + + hive.tez.min.partition.factor + 0.25 + + When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number + of reducers that tez specifies. + + + diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml index ed3ee42..50027de 100644 --- a/data/conf/hive-site.xml +++ b/data/conf/hive-site.xml @@ -25,6 +25,12 @@ Internal marker for test. Used for masking env-dependent values + + hive.cbo.enable + true + Enable CBO for Unit Tests + + diff --git a/data/files/cbo_t1.txt b/data/files/cbo_t1.txt new file mode 100644 index 0000000..e8034a4 --- /dev/null +++ b/data/files/cbo_t1.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t2.txt b/data/files/cbo_t2.txt new file mode 100644 index 0000000..34633d3 --- /dev/null +++ b/data/files/cbo_t2.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t3.txt b/data/files/cbo_t3.txt new file mode 100644 index 0000000..a9c995e --- /dev/null +++ b/data/files/cbo_t3.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +1,1,1,1,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t4.txt b/data/files/cbo_t4.txt new file mode 100644 index 0000000..56e0794 --- /dev/null +++ b/data/files/cbo_t4.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +1,1,1,1,true +2,2,2,2,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t5.txt b/data/files/cbo_t5.txt new file mode 100644 index 0000000..245b1b9 --- /dev/null +++ b/data/files/cbo_t5.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +5,5,5,5,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +5,5,5,5,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t6.txt b/data/files/cbo_t6.txt new file mode 100644 index 0000000..dd72edd --- /dev/null +++ b/data/files/cbo_t6.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +5,5,5,5,true +6,6,6,6,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/itests/qtest/testconfiguration.properties b/itests/qtest/testconfiguration.properties new file mode 100644 index 0000000..18e9a42 --- /dev/null +++ b/itests/qtest/testconfiguration.properties @@ -0,0 +1,5 @@ +minimr.query.files=stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q,empty_dir_in_table.q,temp_table_external.q +minimr.query.negative.files=cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q,file_with_header_footer_negative.q,udf_local_resource.q +minitez.query.files=tez_fsstat.q,mapjoin_decimal.q,tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q,tez_union.q,bucket_map_join_tez1.q,bucket_map_join_tez2.q,tez_schema_evolution.q,tez_join_hash.q +minitez.query.files.shared=orc_merge1.q,orc_merge2.q,orc_merge3.q,orc_merge4.q,alter_merge_orc.q,alter_merge_2_orc.q,alter_merge_stats_orc.q,cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q,metadataonly1.q,temp_table.q,vectorized_ptf.q,optimize_nullscan.q,vector_cast_constant.q,vector_string_concat.q,vector_decimal_aggregate.q,vector_left_outer_join.q,vectorization_12.q,vectorization_13.q,vectorization_14.q,vectorization_9.q,vectorization_part_project.q,vectorization_short_regress.q,vectorized_mapjoin.q,vectorized_nested_mapjoin.q,vectorized_shufflejoin.q,vectorized_timestamp_funcs.q,cbo_correctness.q +beeline.positive.exclude=add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 4ad72a3..3571ae0 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -55,6 +55,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\ bucket2.q,\ bucket3.q,\ bucket4.q,\ + cbo_correctness.q,\ count.q,\ create_merge_compressed.q,\ cross_join.q,\ diff --git a/ql/pom.xml b/ql/pom.xml index 0729d47..7b113fb 100644 --- a/ql/pom.xml +++ b/ql/pom.xml @@ -28,6 +28,7 @@ Hive Query Language + 0.9.1-incubating-SNAPSHOT .. @@ -182,6 +183,42 @@ ${datanucleus-core.version} + org.apache.optiq + optiq-core + ${optiq.version} + + + + org.hsqldb + hsqldb + + + com.fasterxml.jackson.core + jackson-databind + + + + + org.apache.optiq + optiq-avatica + ${optiq.version} + + + + org.hsqldb + hsqldb + + + com.fasterxml.jackson.core + jackson-databind + + + + com.google.guava guava ${guava.version} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java index 1ba5654..4e8f6af 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java @@ -48,12 +48,28 @@ boolean mapJoinRemoved = false; boolean hasMapGroupBy = false; + private int noOfJoins = 0; + private int noOfOuterJoins = 0; + + private boolean multiDestQuery; + private boolean filterWithSubQuery; + public boolean hasJoin() { - return hasJoin; + return (noOfJoins > 0); } - public void setHasJoin(boolean hasJoin) { - this.hasJoin = hasJoin; + public void incrementJoinCount(boolean outerJoin) { + noOfJoins++; + if (outerJoin) + noOfOuterJoins++; + } + + public int getJoinCount() { + return noOfJoins; + } + + public int getOuterJoinCount() { + return noOfOuterJoins; } public boolean hasGroupBy() { @@ -143,4 +159,45 @@ public boolean isHasMapGroupBy() { public void setHasMapGroupBy(boolean hasMapGroupBy) { this.hasMapGroupBy = hasMapGroupBy; } + + public boolean hasMultiDestQuery() { + return this.multiDestQuery; + } + + public void setMultiDestQuery(boolean multiDestQuery) { + this.multiDestQuery = multiDestQuery; + } + + public void setFilterWithSubQuery(boolean filterWithSubQuery) { + this.filterWithSubQuery = filterWithSubQuery; + } + + public boolean hasFilterWithSubQuery() { + return this.filterWithSubQuery; + } + + + public void clear() { + hasJoin = false; + hasGroupBy = false; + hasOrderBy = false; + hasSortBy = false; + hasJoinFollowedByGroupBy = false; + hasPTF = false; + hasWindowing = false; + + // does the query have a using clause + usesScript = false; + + hasDistributeBy = false; + hasClusterBy = false; + mapJoinRemoved = false; + hasMapGroupBy = false; + + noOfJoins = 0; + noOfOuterJoins = 0; + + multiDestQuery = false; + filterWithSubQuery = false; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java new file mode 100644 index 0000000..e5b64a6 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.optimizer.lineage.Generator; +import org.apache.hadoop.hive.ql.optimizer.pcr.PartitionConditionRemover; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; +import org.apache.hadoop.hive.ql.optimizer.stats.annotation.AnnotateWithStatistics; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.ppd.PredicatePushDown; +import org.apache.hadoop.hive.ql.ppd.PredicateTransitivePropagate; + +/* + * do PredicatePushdown, PartitionPruning and Column Pruning before CBO + */ +public class PreCBOOptimizer { + private ParseContext pctx; + private List transformations; + + /** + * Create the list of transformations. + * + * @param hiveConf + */ + public void initialize(HiveConf hiveConf) { + transformations = new ArrayList(); + // Add the transformation that computes the lineage information. + transformations.add(new Generator()); + transformations.add(new PredicateTransitivePropagate()); + transformations.add(new PredicatePushDown()); + transformations.add(new PartitionPruner()); + transformations.add(new PartitionConditionRemover()); + transformations.add(new ColumnPruner()); + transformations.add(new AnnotateWithStatistics()); + } + + /** + * Invoke all the transformations one-by-one, and alter the query plan. + * + * @return ParseContext + * @throws SemanticException + */ + public ParseContext optimize() throws SemanticException { + for (Transform t : transformations) { + pctx = t.transform(pctx); + } + return pctx; + } + + /** + * @return the pctx + */ + public ParseContext getPctx() { + return pctx; + } + + /** + * @param pctx + * the pctx to set + */ + public void setPctx(ParseContext pctx) { + this.pctx = pctx; + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java new file mode 100644 index 0000000..2c08772 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java @@ -0,0 +1,27 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import com.google.common.collect.ImmutableList; + +import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdDistinctRowCount; +import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdSelectivity; +import org.eigenbase.rel.metadata.ChainedRelMetadataProvider; +import org.eigenbase.rel.metadata.DefaultRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataProvider; + +/** + * Distinct row count and Selectivity is overridden for Hive.
+ *

+ * Distinct Row Count is overridden for:
+ * 1) Join 2) TableScan.
+ * Selectivity is overridden for:
+ * 1) Join 2) TableScan & Filter. + */ +public class HiveDefaultRelMetadataProvider { + private HiveDefaultRelMetadataProvider() { + } + + public static final RelMetadataProvider INSTANCE = ChainedRelMetadataProvider.of(ImmutableList + .of(HiveRelMdDistinctRowCount.SOURCE, + HiveRelMdSelectivity.SOURCE, + new DefaultRelMetadataProvider())); +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java new file mode 100644 index 0000000..9faae39 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java @@ -0,0 +1,200 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; + +import org.eigenbase.rel.RelFactories.ProjectFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.sql.validate.SqlValidatorUtil; +import org.eigenbase.util.Pair; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; + +/** + * Generic utility functions needed for Optiq based Hive CBO. + */ + +public class HiveOptiqUtil { + + /** + * Get list of virtual columns from the given list of projections. + *

+ * + * @param exps + * list of rex nodes representing projections + * @return List of Virtual Columns, will not be null. + */ + public static List getVirtualCols(List exps) { + List vCols = new ArrayList(); + + for (int i = 0; i < exps.size(); i++) { + if (!(exps.get(i) instanceof RexInputRef)) { + vCols.add(i); + } + } + + return vCols; + } + + public static List getProjsFromBelowAsInputRef(final RelNode rel) { + List projectList = Lists.transform(rel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField field) { + return rel.getCluster().getRexBuilder().makeInputRef(field.getType(), field.getIndex()); + } + }); + return projectList; + } + + public static List translateBitSetToProjIndx(BitSet projBitSet) { + List projIndxLst = new ArrayList(); + + for (int i = 0; i < projBitSet.length(); i++) { + if (projBitSet.get(i)) { + projIndxLst.add(i); + } + } + + return projIndxLst; + } + + @Deprecated + public static void todo(String s) { + } + + /** + * Push any equi join conditions that are not column references as Projections + * on top of the children. + * + * @param factory + * Project factory to use. + * @param inputRels + * inputs to a join + * @param leftJoinKeys + * expressions for LHS of join key + * @param rightJoinKeys + * expressions for RHS of join key + * @param systemColCount + * number of system columns, usually zero. These columns are + * projected at the leading edge of the output row. + * @param leftKeys on return this contains the join key positions from + * the new project rel on the LHS. + * @param rightKeys on return this contains the join key positions from + * the new project rel on the RHS. + * @return the join condition after the equi expressions pushed down. + */ + public static RexNode projectNonColumnEquiConditions(ProjectFactory factory, + RelNode[] inputRels, List leftJoinKeys, + List rightJoinKeys, int systemColCount, List leftKeys, + List rightKeys) { + RelNode leftRel = inputRels[0]; + RelNode rightRel = inputRels[1]; + RexBuilder rexBuilder = leftRel.getCluster().getRexBuilder(); + RexNode outJoinCond = null; + + int origLeftInputSize = leftRel.getRowType().getFieldCount(); + int origRightInputSize = rightRel.getRowType().getFieldCount(); + + List newLeftFields = new ArrayList(); + List newLeftFieldNames = new ArrayList(); + + List newRightFields = new ArrayList(); + List newRightFieldNames = new ArrayList(); + int leftKeyCount = leftJoinKeys.size(); + int i; + + for (i = 0; i < origLeftInputSize; i++) { + final RelDataTypeField field = leftRel.getRowType().getFieldList().get(i); + newLeftFields.add(rexBuilder.makeInputRef(field.getType(), i)); + newLeftFieldNames.add(field.getName()); + } + + for (i = 0; i < origRightInputSize; i++) { + final RelDataTypeField field = rightRel.getRowType().getFieldList() + .get(i); + newRightFields.add(rexBuilder.makeInputRef(field.getType(), i)); + newRightFieldNames.add(field.getName()); + } + + int newKeyCount = 0; + List> origColEqConds = new ArrayList>(); + for (i = 0; i < leftKeyCount; i++) { + RexNode leftKey = leftJoinKeys.get(i); + RexNode rightKey = rightJoinKeys.get(i); + + if (leftKey instanceof RexInputRef && rightKey instanceof RexInputRef) { + origColEqConds.add(Pair.of(((RexInputRef) leftKey).getIndex(), + ((RexInputRef) rightKey).getIndex())); + } else { + newLeftFields.add(leftKey); + newLeftFieldNames.add(null); + newRightFields.add(rightKey); + newRightFieldNames.add(null); + newKeyCount++; + } + } + + for (i = 0; i < origColEqConds.size(); i++) { + Pair p = origColEqConds.get(i); + RexNode leftKey = leftJoinKeys.get(i); + RexNode rightKey = rightJoinKeys.get(i); + leftKeys.add(p.left); + rightKeys.add(p.right); + RexNode cond = rexBuilder.makeCall( + SqlStdOperatorTable.EQUALS, + rexBuilder.makeInputRef(leftKey.getType(), systemColCount + p.left), + rexBuilder.makeInputRef(rightKey.getType(), systemColCount + + origLeftInputSize + newKeyCount + p.right)); + if (outJoinCond == null) { + outJoinCond = cond; + } else { + outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, + cond); + } + } + + if (newKeyCount == 0) { + return outJoinCond; + } + + int newLeftOffset = systemColCount + origLeftInputSize; + int newRightOffset = systemColCount + origLeftInputSize + + origRightInputSize + newKeyCount; + for (i = 0; i < newKeyCount; i++) { + leftKeys.add(origLeftInputSize + i); + rightKeys.add(origRightInputSize + i); + RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder + .makeInputRef(newLeftFields.get(i).getType(), newLeftOffset + i), + rexBuilder.makeInputRef(newLeftFields.get(i).getType(), + newRightOffset + i)); + if (outJoinCond == null) { + outJoinCond = cond; + } else { + outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, + cond); + } + } + + // added project if need to produce new keys than the original input + // fields + if (newKeyCount > 0) { + leftRel = factory.createProject(leftRel, newLeftFields, + SqlValidatorUtil.uniquify(newLeftFieldNames)); + rightRel = factory.createProject(rightRel, newRightFields, + SqlValidatorUtil.uniquify(newRightFieldNames)); + } + + inputRels[0] = leftRel; + inputRels[1] = rightRel; + + return outJoinCond; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java new file mode 100644 index 0000000..da77d36 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java @@ -0,0 +1,295 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelOptUtil.InputReferencedVisitor; +import org.eigenbase.rex.RexNode; +import org.eigenbase.sql.SqlKind; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + +/** + * Utility for inspecting Join Conditions.
+ *

+ * Main Elements:
+ * 1. JoinPredicateInfo - represents Join Condition.
+ * 2. JoinLeafPredicateInfo - represents leaf predicates with in join condition. + * + * TODO: Move this to Optiq Framework + */ +public class JoinUtil { + + /** + * JoinPredicateInfo represents Join condition; JoinPredicate Info uses + * JoinLeafPredicateInfo to represent individual conjunctive elements in the + * predicate.
+ * JoinPredicateInfo = JoinLeafPredicateInfo1 and JoinLeafPredicateInfo2...
+ *

+ * JoinPredicateInfo:
+ * 1. preserves the order of conjuctive elements for + * equi-join(m_equiJoinPredicateElements)
+ * 2. Stores set of projection indexes from left and right child which is part + * of equi join keys; the indexes are both in child and Join node schema.
+ * 3. Keeps a map of projection indexes that are part of join keys to list of + * conjuctive elements(JoinLeafPredicateInfo) that uses them. + * + */ + public static class JoinPredicateInfo { + private final ImmutableList m_nonEquiJoinPredicateElements; + private final ImmutableList m_equiJoinPredicateElements; + private final ImmutableSet m_projsFromLeftPartOfJoinKeysInChildSchema; + private final ImmutableSet m_projsFromRightPartOfJoinKeysInChildSchema; + private final ImmutableSet m_projsFromRightPartOfJoinKeysInJoinSchema; + private final ImmutableMap> m_mapOfProjIndxInJoinSchemaToLeafPInfo; + + public JoinPredicateInfo(List nonEquiJoinPredicateElements, + List equiJoinPredicateElements, + Set projsFromLeftPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInJoinSchema, + Map> mapOfProjIndxInJoinSchemaToLeafPInfo) { + m_nonEquiJoinPredicateElements = ImmutableList.copyOf(nonEquiJoinPredicateElements); + m_equiJoinPredicateElements = ImmutableList.copyOf(equiJoinPredicateElements); + m_projsFromLeftPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromLeftPartOfJoinKeysInChildSchema); + m_projsFromRightPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInChildSchema); + m_projsFromRightPartOfJoinKeysInJoinSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInJoinSchema); + m_mapOfProjIndxInJoinSchemaToLeafPInfo = ImmutableMap + .copyOf(mapOfProjIndxInJoinSchemaToLeafPInfo); + } + + public List getNonEquiJoinPredicateElements() { + return m_nonEquiJoinPredicateElements; + } + + public List getEquiJoinPredicateElements() { + return m_equiJoinPredicateElements; + } + + public Set getProjsFromLeftPartOfJoinKeysInChildSchema() { + return m_projsFromLeftPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInChildSchema() { + return m_projsFromRightPartOfJoinKeysInChildSchema; + } + + /** + * NOTE: Join Schema = left Schema + (right Schema offset by + * left.fieldcount). Hence its ok to return projections from left in child + * schema. + */ + public Set getProjsFromLeftPartOfJoinKeysInJoinSchema() { + return m_projsFromLeftPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInJoinSchema() { + return m_projsFromRightPartOfJoinKeysInJoinSchema; + } + + public Map> getMapOfProjIndxToLeafPInfo() { + return m_mapOfProjIndxInJoinSchemaToLeafPInfo; + } + + public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoinRel j) { + return constructJoinPredicateInfo(j, j.getCondition()); + } + + public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoinRel j, RexNode predicate) { + JoinPredicateInfo jpi = null; + JoinLeafPredicateInfo jlpi = null; + List equiLPIList = new ArrayList(); + List nonEquiLPIList = new ArrayList(); + Set projsFromLeftPartOfJoinKeys = new HashSet(); + Set projsFromRightPartOfJoinKeys = new HashSet(); + Set projsFromRightPartOfJoinKeysInJoinSchema = new HashSet(); + Map> tmpMapOfProjIndxInJoinSchemaToLeafPInfo = new HashMap>(); + Map> mapOfProjIndxInJoinSchemaToLeafPInfo = new HashMap>(); + List tmpJLPILst = null; + int rightOffSet = j.getLeft().getRowType().getFieldCount(); + int projIndxInJoin; + List conjuctiveElements; + + todo("Move this to Optiq"); + + // 1. Decompose Join condition to a number of leaf predicates + // (conjuctive elements) + conjuctiveElements = RelOptUtil.conjunctions(predicate); + + // 2. Walk through leaf predicates building up JoinLeafPredicateInfo + for (RexNode ce : conjuctiveElements) { + // 2.1 Construct JoinLeafPredicateInfo + jlpi = JoinLeafPredicateInfo.constructJoinLeafPredicateInfo(j, ce); + + // 2.2 Classify leaf predicate as Equi vs Non Equi + if (jlpi.m_comparisonType.equals(SqlKind.EQUALS)) { + equiLPIList.add(jlpi); + } else { + nonEquiLPIList.add(jlpi); + } + + // 2.3 Maintain join keys coming from left vs right (in child & + // Join Schema) + projsFromLeftPartOfJoinKeys.addAll(jlpi.getProjsFromLeftPartOfJoinKeysInChildSchema()); + projsFromRightPartOfJoinKeys.addAll(jlpi.getProjsFromRightPartOfJoinKeysInChildSchema()); + projsFromRightPartOfJoinKeysInJoinSchema.addAll(jlpi + .getProjsFromRightPartOfJoinKeysInJoinSchema()); + + // 2.4 Update Join Key to JoinLeafPredicateInfo map with keys + // from left + for (Integer projIndx : jlpi.getProjsFromLeftPartOfJoinKeysInChildSchema()) { + tmpJLPILst = tmpMapOfProjIndxInJoinSchemaToLeafPInfo.get(projIndx); + if (tmpJLPILst == null) + tmpJLPILst = new ArrayList(); + tmpJLPILst.add(jlpi); + tmpMapOfProjIndxInJoinSchemaToLeafPInfo.put(projIndx, tmpJLPILst); + } + + // 2.5 Update Join Key to JoinLeafPredicateInfo map with keys + // from right + for (Integer projIndx : jlpi.getProjsFromRightPartOfJoinKeysInChildSchema()) { + projIndxInJoin = projIndx + rightOffSet; + tmpJLPILst = tmpMapOfProjIndxInJoinSchemaToLeafPInfo.get(projIndxInJoin); + if (tmpJLPILst == null) + tmpJLPILst = new ArrayList(); + tmpJLPILst.add(jlpi); + tmpMapOfProjIndxInJoinSchemaToLeafPInfo.put(projIndxInJoin, tmpJLPILst); + } + + } + + // 3. Update Update Join Key to List to use + // ImmutableList + for (Entry> e : tmpMapOfProjIndxInJoinSchemaToLeafPInfo + .entrySet()) { + mapOfProjIndxInJoinSchemaToLeafPInfo.put(e.getKey(), ImmutableList.copyOf(e.getValue())); + } + + // 4. Construct JoinPredicateInfo + jpi = new JoinPredicateInfo(nonEquiLPIList, equiLPIList, projsFromLeftPartOfJoinKeys, + projsFromRightPartOfJoinKeys, projsFromRightPartOfJoinKeysInJoinSchema, + mapOfProjIndxInJoinSchemaToLeafPInfo); + return jpi; + } + } + + /** + * JoinLeafPredicateInfo represents leaf predicate in Join condition + * (conjuctive lement).
+ *

+ * JoinLeafPredicateInfo:
+ * 1. Stores list of expressions from left and right child which is part of + * equi join keys.
+ * 2. Stores set of projection indexes from left and right child which is part + * of equi join keys; the indexes are both in child and Join node schema.
+ */ + public static class JoinLeafPredicateInfo { + private final SqlKind m_comparisonType; + private final ImmutableList m_joinKeyExprsFromLeft; + private final ImmutableList m_joinKeyExprsFromRight; + private final ImmutableSet m_projsFromLeftPartOfJoinKeysInChildSchema; + private final ImmutableSet m_projsFromRightPartOfJoinKeysInChildSchema; + private final ImmutableSet m_projsFromRightPartOfJoinKeysInJoinSchema; + + public JoinLeafPredicateInfo(SqlKind comparisonType, List joinKeyExprsFromLeft, + List joinKeyExprsFromRight, Set projsFromLeftPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInJoinSchema) { + m_comparisonType = comparisonType; + m_joinKeyExprsFromLeft = ImmutableList.copyOf(joinKeyExprsFromLeft); + m_joinKeyExprsFromRight = ImmutableList.copyOf(joinKeyExprsFromRight); + m_projsFromLeftPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromLeftPartOfJoinKeysInChildSchema); + m_projsFromRightPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInChildSchema); + m_projsFromRightPartOfJoinKeysInJoinSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInJoinSchema); + } + + public List getJoinKeyExprsFromLeft() { + return m_joinKeyExprsFromLeft; + } + + public List getJoinKeyExprsFromRight() { + return m_joinKeyExprsFromRight; + } + + public Set getProjsFromLeftPartOfJoinKeysInChildSchema() { + return m_projsFromLeftPartOfJoinKeysInChildSchema; + } + + /** + * NOTE: Join Schema = left Schema + (right Schema offset by + * left.fieldcount). Hence its ok to return projections from left in child + * schema. + */ + public Set getProjsFromLeftPartOfJoinKeysInJoinSchema() { + return m_projsFromLeftPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInChildSchema() { + return m_projsFromRightPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInJoinSchema() { + return m_projsFromRightPartOfJoinKeysInJoinSchema; + } + + public static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(HiveJoinRel j, RexNode pe) { + JoinLeafPredicateInfo jlpi = null; + List filterNulls = new ArrayList(); + List joinKeyExprsFromLeft = new ArrayList(); + List joinKeyExprsFromRight = new ArrayList(); + Set projsFromLeftPartOfJoinKeysInChildSchema = new HashSet(); + Set projsFromRightPartOfJoinKeysInChildSchema = new HashSet(); + Set projsFromRightPartOfJoinKeysInJoinSchema = new HashSet(); + int rightOffSet = j.getLeft().getRowType().getFieldCount(); + + todo("Move this to Optiq"); + + // 1. Split leaf join predicate to expressions from left, right + @SuppressWarnings("unused") + RexNode nonEquiPredicate = RelOptUtil.splitJoinCondition(j.getSystemFieldList(), j.getLeft(), + j.getRight(), pe, joinKeyExprsFromLeft, joinKeyExprsFromRight, filterNulls, null); + + // 2. For left expressions, collect child projection indexes used + InputReferencedVisitor irvLeft = new InputReferencedVisitor(); + irvLeft.apply(joinKeyExprsFromLeft); + projsFromLeftPartOfJoinKeysInChildSchema.addAll(irvLeft.inputPosReferenced); + + // 3. For right expressions, collect child projection indexes used + InputReferencedVisitor irvRight = new InputReferencedVisitor(); + irvRight.apply(joinKeyExprsFromRight); + projsFromRightPartOfJoinKeysInChildSchema.addAll(irvRight.inputPosReferenced); + + // 3. Translate projection indexes from right to join schema, by adding + // offset. + for (Integer indx : projsFromRightPartOfJoinKeysInChildSchema) { + projsFromRightPartOfJoinKeysInJoinSchema.add(indx + rightOffSet); + } + + // 4. Construct JoinLeafPredicateInfo + jlpi = new JoinLeafPredicateInfo(pe.getKind(), joinKeyExprsFromLeft, joinKeyExprsFromRight, + projsFromLeftPartOfJoinKeysInChildSchema, projsFromRightPartOfJoinKeysInChildSchema, + projsFromRightPartOfJoinKeysInJoinSchema); + + return jlpi; + } + } + + @Deprecated + public static void todo(String s) { + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java new file mode 100644 index 0000000..c923340 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java @@ -0,0 +1,19 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +public class Pair { + private final T1 m_first; + private final T2 m_second; + + public Pair(T1 first, T2 second) { + m_first = first; + m_second = second; + } + + public T1 getFirst() { + return m_first; + } + + public T2 getSecond() { + return m_second; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java new file mode 100644 index 0000000..d3e517d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java @@ -0,0 +1,321 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ExprNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; +import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.TableAccessRel; +import org.eigenbase.relopt.RelOptAbstractTable; +import org.eigenbase.relopt.RelOptSchema; +import org.eigenbase.relopt.RelOptUtil.InputFinder; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.rex.RexNode; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMap.Builder; + +public class RelOptHiveTable extends RelOptAbstractTable { + private final Table m_hiveTblMetadata; + private final ImmutableList m_hiveNonPartitionCols; + private final ImmutableMap m_hiveNonPartitionColsMap; + private final ImmutableMap m_hivePartitionColsMap; + private final int m_noOfProjs; + final HiveConf m_hiveConf; + + private double m_rowCount = -1; + Map m_hiveColStatsMap = new HashMap(); + private Integer m_numPartitions; + PrunedPartitionList partitionList; + Map partitionCache; + AtomicInteger noColsMissingStats; + + protected static final Log LOG = LogFactory + .getLog(RelOptHiveTable.class + .getName()); + + public RelOptHiveTable(RelOptSchema optiqSchema, String name, RelDataType rowType, + Table hiveTblMetadata, List hiveNonPartitionCols, + List hivePartitionCols, HiveConf hconf, Map partitionCache, AtomicInteger noColsMissingStats) { + super(optiqSchema, name, rowType); + m_hiveTblMetadata = hiveTblMetadata; + m_hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols); + m_hiveNonPartitionColsMap = getColInfoMap(hiveNonPartitionCols, 0); + m_hivePartitionColsMap = getColInfoMap(hivePartitionCols, m_hiveNonPartitionColsMap.size()); + m_noOfProjs = hiveNonPartitionCols.size() + hivePartitionCols.size(); + m_hiveConf = hconf; + this.partitionCache = partitionCache; + this.noColsMissingStats = noColsMissingStats; + } + + private static ImmutableMap getColInfoMap(List hiveCols, + int startIndx) { + Builder bldr = ImmutableMap. builder(); + + int indx = startIndx; + for (ColumnInfo ci : hiveCols) { + bldr.put(indx, ci); + indx++; + } + + return bldr.build(); + } + + @Override + public boolean isKey(BitSet arg0) { + return false; + } + + @Override + public RelNode toRel(ToRelContext context) { + return new TableAccessRel(context.getCluster(), this); + } + + @Override + public T unwrap(Class arg0) { + return arg0.isInstance(this) ? arg0.cast(this) : null; + } + + @Override + public double getRowCount() { + if (m_rowCount == -1) { + if (null == partitionList) { + // we are here either unpartitioned table or partitioned table with no predicates + computePartitionList(m_hiveConf, null); + } + if (m_hiveTblMetadata.isPartitioned()) { + List rowCounts = StatsUtils.getBasicStatForPartitions( + m_hiveTblMetadata, partitionList.getNotDeniedPartns(), + StatsSetupConst.ROW_COUNT); + m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); + + } else { + m_rowCount = StatsUtils.getNumRows(m_hiveTblMetadata); + } + } + + return m_rowCount; + } + + public Table getHiveTableMD() { + return m_hiveTblMetadata; + } + + private String getColNamesForLogging(Set colLst) { + StringBuffer sb = new StringBuffer(); + boolean firstEntry = true; + for (String colName : colLst) { + if (firstEntry) { + sb.append(colName); + firstEntry = false; + } else { + sb.append(", " + colName); + } + } + return sb.toString(); + } + + public void computePartitionList(HiveConf conf, RexNode pruneNode) { + + try { + if (!m_hiveTblMetadata.isPartitioned() || pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) { + // there is no predicate on partitioning column, we need all partitions in this case. + partitionList = PartitionPruner.prune(m_hiveTblMetadata, null, conf, getName(), partitionCache); + return; + } + + // We have valid pruning expressions, only retrieve qualifying partitions + ExprNodeDesc pruneExpr = pruneNode.accept(new ExprNodeConverter(getName(), getRowType(), true)); + + partitionList = PartitionPruner.prune(m_hiveTblMetadata, pruneExpr, conf, getName(), partitionCache); + } catch (HiveException he) { + throw new RuntimeException(he); + } + } + + private void updateColStats(Set projIndxLst) { + List nonPartColNamesThatRqrStats = new ArrayList(); + List nonPartColIndxsThatRqrStats = new ArrayList(); + List partColNamesThatRqrStats = new ArrayList(); + List partColIndxsThatRqrStats = new ArrayList(); + Set colNamesFailedStats = new HashSet(); + + // 1. Separate required columns to Non Partition and Partition Cols + ColumnInfo tmp; + for (Integer pi : projIndxLst) { + if (m_hiveColStatsMap.get(pi) == null) { + if ((tmp = m_hiveNonPartitionColsMap.get(pi)) != null) { + nonPartColNamesThatRqrStats.add(tmp.getInternalName()); + nonPartColIndxsThatRqrStats.add(pi); + } else if ((tmp = m_hivePartitionColsMap.get(pi)) != null) { + partColNamesThatRqrStats.add(tmp.getInternalName()); + partColIndxsThatRqrStats.add(pi); + } else { + String logMsg = "Unable to find Column Index: " + pi + ", in " + + m_hiveTblMetadata.getCompleteName(); + LOG.error(logMsg); + throw new RuntimeException(logMsg); + } + } + } + + if (null == partitionList) { + // We could be here either because its an unpartitioned table or because + // there are no pruning predicates on a partitioned table. + computePartitionList(m_hiveConf, null); + } + + // 2. Obtain Col Stats for Non Partition Cols + if (nonPartColNamesThatRqrStats.size() > 0) { + List hiveColStats; + + if (!m_hiveTblMetadata.isPartitioned()) { + // 2.1 Handle the case for unpartitioned table. + hiveColStats = StatsUtils.getTableColumnStats(m_hiveTblMetadata, m_hiveNonPartitionCols, + nonPartColNamesThatRqrStats); + + // 2.1.1 Record Column Names that we needed stats for but couldn't + if (hiveColStats == null) { + colNamesFailedStats.addAll(nonPartColNamesThatRqrStats); + } else if (hiveColStats.size() != nonPartColNamesThatRqrStats.size()) { + Set setOfFiledCols = new HashSet(nonPartColNamesThatRqrStats); + + Set setOfObtainedColStats = new HashSet(); + for (ColStatistics cs : hiveColStats) { + setOfObtainedColStats.add(cs.getColumnName()); + } + setOfFiledCols.removeAll(setOfObtainedColStats); + + colNamesFailedStats.addAll(setOfFiledCols); + } + } else { + // 2.2 Obtain col stats for partitioned table. + try { + if (partitionList.getNotDeniedPartns().isEmpty()) { + // no need to make a metastore call + m_rowCount = 0; + hiveColStats = new ArrayList(); + for (String c : nonPartColNamesThatRqrStats) { + // add empty stats object for each column + hiveColStats.add(new ColStatistics(m_hiveTblMetadata.getTableName(), c, null)); + } + colNamesFailedStats.clear(); + } else { + Statistics stats = StatsUtils.collectStatistics(m_hiveConf, partitionList, + m_hiveTblMetadata, m_hiveNonPartitionCols, nonPartColNamesThatRqrStats, true, true); + m_rowCount = stats.getNumRows(); + hiveColStats = new ArrayList(); + for (String c : nonPartColNamesThatRqrStats) { + ColStatistics cs = stats.getColumnStatisticsFromColName(c); + if (cs != null) { + hiveColStats.add(cs); + } else { + colNamesFailedStats.add(c); + } + } + } + } catch (HiveException e) { + String logMsg = "Collecting stats failed."; + LOG.error(logMsg); + throw new RuntimeException(logMsg); + } + } + + if (hiveColStats != null && hiveColStats.size() == nonPartColNamesThatRqrStats.size()) { + for (int i = 0; i < hiveColStats.size(); i++) { + m_hiveColStatsMap.put(nonPartColIndxsThatRqrStats.get(i), hiveColStats.get(i)); + } + } + } + + // 3. Obtain Stats for Partition Cols + if (colNamesFailedStats.isEmpty() && !partColNamesThatRqrStats.isEmpty()) { + m_numPartitions = partitionList.getPartitions().size(); + ColStatistics cStats = null; + for (int i = 0; i < partColNamesThatRqrStats.size(); i++) { + cStats = new ColStatistics(m_hiveTblMetadata.getTableName(), + partColNamesThatRqrStats.get(i), m_hivePartitionColsMap.get( + partColIndxsThatRqrStats.get(i)).getTypeName()); + cStats.setCountDistint(getDistinctCount(partitionList.getPartitions(),partColNamesThatRqrStats.get(i))); + m_hiveColStatsMap.put(partColIndxsThatRqrStats.get(i), cStats); + } + } + + // 4. Warn user if we could get stats for required columns + if (!colNamesFailedStats.isEmpty()) { + String logMsg = "No Stats for " + m_hiveTblMetadata.getCompleteName() + ", Columns: " + + getColNamesForLogging(colNamesFailedStats); + LOG.error(logMsg); + noColsMissingStats.getAndAdd(colNamesFailedStats.size()); + throw new RuntimeException(logMsg); + } + } + + private int getDistinctCount(Set partitions, String partColName) { + Set distinctVals = new HashSet(partitions.size()); + for (Partition partition : partitions) { + distinctVals.add(partition.getSpec().get(partColName)); + } + return distinctVals.size(); + } + + public List getColStat(List projIndxLst) { + ImmutableList.Builder colStatsBldr = ImmutableList. builder(); + + if (projIndxLst != null) { + updateColStats(new HashSet(projIndxLst)); + for (Integer i : projIndxLst) { + colStatsBldr.add(m_hiveColStatsMap.get(i)); + } + } else { + List pILst = new ArrayList(); + for (Integer i = 0; i < m_noOfProjs; i++) { + pILst.add(i); + } + updateColStats(new HashSet(pILst)); + for (Integer pi : pILst) { + colStatsBldr.add(m_hiveColStatsMap.get(pi)); + } + } + + return colStatsBldr.build(); + } + + /* + * use to check if a set of columns are all partition columns. + * true only if: + * - all columns in BitSet are partition + * columns. + */ + public boolean containsPartitionColumnsOnly(BitSet cols) { + + for (int i = cols.nextSetBit(0); i >= 0; i++, i = cols.nextSetBit(i + 1)) { + if (!m_hivePartitionColsMap.containsKey(i)) { + return false; + } + } + return true; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java new file mode 100644 index 0000000..d4bd678 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java @@ -0,0 +1,52 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelCollationImpl; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; + +public class TraitsUtil { + + public static RelTraitSet getSelectTraitSet(RelOptCluster cluster, RelNode child) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } + + public static RelTraitSet getSortTraitSet(RelOptCluster cluster, RelTraitSet traitSet, + RelCollation collation) { + return traitSet.plus(collation); + } + + public static RelTraitSet getFilterTraitSet(RelOptCluster cluster, RelTraitSet traitSet, + RelNode child) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } + + public static RelTraitSet getLimitTraitSet(RelOptCluster cluster, RelTraitSet traitSet, + RelNode child) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } + + public static RelTraitSet getAggregateTraitSet(RelOptCluster cluster, RelTraitSet traitSet, + List gbCols, List aggCalls, RelNode child) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } + + public static RelTraitSet getTableScanTraitSet(RelOptCluster cluster, RelTraitSet traitSet, + RelOptHiveTable table, RelDataType rowtype) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } + + public static RelTraitSet getJoinTraitSet(RelOptCluster cluster, RelTraitSet traitSet) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } + + public static RelTraitSet getUnionTraitSet(RelOptCluster cluster, RelTraitSet traitSet) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java new file mode 100644 index 0000000..34a37e4 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java @@ -0,0 +1,194 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.cost; + +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptCostFactory; +import org.eigenbase.relopt.RelOptUtil; + +// TODO: This should inherit from VolcanoCost and should just override isLE method. +public class HiveCost implements RelOptCost { + // ~ Static fields/initializers --------------------------------------------- + + public static final HiveCost INFINITY = new HiveCost(Double.POSITIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.POSITIVE_INFINITY) { + @Override + public String toString() { + return "{inf}"; + } + }; + + public static final HiveCost HUGE = new HiveCost(Double.MAX_VALUE, Double.MAX_VALUE, + Double.MAX_VALUE) { + @Override + public String toString() { + return "{huge}"; + } + }; + + public static final HiveCost ZERO = new HiveCost(0.0, 0.0, 0.0) { + @Override + public String toString() { + return "{0}"; + } + }; + + public static final HiveCost TINY = new HiveCost(1.0, 1.0, 0.0) { + @Override + public String toString() { + return "{tiny}"; + } + }; + + public static final RelOptCostFactory FACTORY = new Factory(); + + // ~ Instance fields -------------------------------------------------------- + + final double cpu; + final double io; + final double rowCount; + + // ~ Constructors ----------------------------------------------------------- + + HiveCost(double rowCount, double cpu, double io) { + assert rowCount >= 0d; + assert cpu >= 0d; + assert io >= 0d; + this.rowCount = rowCount; + this.cpu = cpu; + this.io = io; + } + + // ~ Methods ---------------------------------------------------------------- + + public double getCpu() { + return cpu; + } + + public boolean isInfinite() { + return (this == INFINITY) || (this.rowCount == Double.POSITIVE_INFINITY) + || (this.cpu == Double.POSITIVE_INFINITY) || (this.io == Double.POSITIVE_INFINITY); + } + + public double getIo() { + return io; + } + + // TODO: If two cost is equal, could we do any better than comparing + // cardinality (may be some other heuristics to break the tie) + public boolean isLe(RelOptCost other) { + return this == other || this.rowCount <= other.getRows(); + /* + * if (((this.dCpu + this.dIo) < (other.getCpu() + other.getIo())) || + * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo()) && this.dRows + * <= other.getRows())) { return true; } else { return false; } + */ + } + + public boolean isLt(RelOptCost other) { + return this.rowCount < other.getRows(); + /* + * return isLe(other) && !equals(other); + */ + } + + public double getRows() { + return rowCount; + } + + public boolean equals(RelOptCost other) { + return (this == other) || ((this.rowCount) == (other.getRows())); + + /* + * //TODO: should we consider cardinality as well? return (this == other) || + * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo())); + */ + } + + public boolean isEqWithEpsilon(RelOptCost other) { + return (this == other) || (Math.abs((this.rowCount) - (other.getRows())) < RelOptUtil.EPSILON); + /* + * return (this == other) || (Math.abs((this.dCpu + this.dIo) - + * (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON); + */ + } + + public RelOptCost minus(RelOptCost other) { + if (this == INFINITY) { + return this; + } + + return new HiveCost(this.rowCount - other.getRows(), this.cpu - other.getCpu(), this.io + - other.getIo()); + } + + public RelOptCost multiplyBy(double factor) { + if (this == INFINITY) { + return this; + } + return new HiveCost(rowCount * factor, cpu * factor, io * factor); + } + + public double divideBy(RelOptCost cost) { + // Compute the geometric average of the ratios of all of the factors + // which are non-zero and finite. + double d = 1; + double n = 0; + if ((this.rowCount != 0) && !Double.isInfinite(this.rowCount) && (cost.getRows() != 0) + && !Double.isInfinite(cost.getRows())) { + d *= this.rowCount / cost.getRows(); + ++n; + } + if ((this.cpu != 0) && !Double.isInfinite(this.cpu) && (cost.getCpu() != 0) + && !Double.isInfinite(cost.getCpu())) { + d *= this.cpu / cost.getCpu(); + ++n; + } + if ((this.io != 0) && !Double.isInfinite(this.io) && (cost.getIo() != 0) + && !Double.isInfinite(cost.getIo())) { + d *= this.io / cost.getIo(); + ++n; + } + if (n == 0) { + return 1.0; + } + return Math.pow(d, 1 / n); + } + + public RelOptCost plus(RelOptCost other) { + if ((this == INFINITY) || (other.isInfinite())) { + return INFINITY; + } + return new HiveCost(this.rowCount + other.getRows(), this.cpu + other.getCpu(), this.io + + other.getIo()); + } + + @Override + public String toString() { + return "{" + rowCount + " rows, " + cpu + " cpu, " + io + " io}"; + } + + private static class Factory implements RelOptCostFactory { + private Factory() { + } + + public RelOptCost makeCost(double rowCount, double cpu, double io) { + return new HiveCost(rowCount, cpu, io); + } + + public RelOptCost makeHugeCost() { + return HUGE; + } + + public HiveCost makeInfiniteCost() { + return INFINITY; + } + + public HiveCost makeTinyCost() { + return TINY; + } + + public HiveCost makeZeroCost() { + return ZERO; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java new file mode 100644 index 0000000..926bca5 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java @@ -0,0 +1,23 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.cost; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.relopt.RelOptCost; + +public class HiveCostUtil { + private static final double cpuCostInNanoSec = 1.0; + private static final double netCostInNanoSec = 150 * cpuCostInNanoSec; + private static final double localFSWriteCostInNanoSec = 4 * netCostInNanoSec; + private static final double localFSReadCostInNanoSec = 4 * netCostInNanoSec; + private static final double hDFSWriteCostInNanoSec = 10 * localFSWriteCostInNanoSec; + private static final double hDFSReadCostInNanoSec = 1.5 * localFSReadCostInNanoSec; + + public static RelOptCost computCardinalityBasedCost(HiveRel hr) { + return new HiveCost(hr.getRows(), 0, 0); + } + + public static HiveCost computeCost(HiveTableScanRel t) { + double cardinality = t.getRows(); + return new HiveCost(cardinality, 0, hDFSWriteCostInNanoSec * cardinality * 0); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java new file mode 100644 index 0000000..15596bc --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java @@ -0,0 +1,31 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.cost; + +import org.eigenbase.rel.RelCollationTraitDef; +import org.eigenbase.relopt.ConventionTraitDef; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.volcano.VolcanoPlanner; + +/** + * Refinement of {@link org.eigenbase.relopt.volcano.VolcanoPlanner} for Hive. + * + *

+ * It uses {@link org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost} as + * its cost model. + */ +public class HiveVolcanoPlanner extends VolcanoPlanner { + private static final boolean ENABLE_COLLATION_TRAIT = true; + + /** Creates a HiveVolcanoPlanner. */ + public HiveVolcanoPlanner() { + super(HiveCost.FACTORY, null); + } + + public static RelOptPlanner createPlanner() { + final VolcanoPlanner planner = new HiveVolcanoPlanner(); + planner.addRelTraitDef(ConventionTraitDef.INSTANCE); + if (ENABLE_COLLATION_TRAIT) { + planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); + } + return planner; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java new file mode 100644 index 0000000..c81fd8a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java @@ -0,0 +1,70 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.BitSet; +import java.util.List; + +import net.hydromatic.optiq.util.BitSets; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.RelFactories.AggregateFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; + +public class HiveAggregateRel extends AggregateRelBase implements HiveRel { + + public static final HiveAggRelFactory HIVE_AGGR_REL_FACTORY = new HiveAggRelFactory(); + + public HiveAggregateRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, + BitSet groupSet, List aggCalls) throws InvalidRelException { + super(cluster, TraitsUtil.getAggregateTraitSet(cluster, traitSet, BitSets.toList(groupSet), + aggCalls, child), child, groupSet, aggCalls); + } + + @Override + public AggregateRelBase copy(RelTraitSet traitSet, RelNode input, BitSet groupSet, + List aggCalls) { + try { + return new HiveAggregateRel(getCluster(), traitSet, input, groupSet, aggCalls); + } catch (InvalidRelException e) { + // Semantic error not possible. Must be a bug. Convert to + // internal error. + throw new AssertionError(e); + } + } + + @Override + public void implement(Implementor implementor) { + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + @Override + public double getRows() { + return RelMetadataQuery.getDistinctRowCount(this, groupSet, getCluster().getRexBuilder() + .makeLiteral(true)); + } + + private static class HiveAggRelFactory implements AggregateFactory { + + @Override + public RelNode createAggregate(RelNode child, BitSet groupSet, + List aggCalls) { + try { + return new HiveAggregateRel(child.getCluster(), child.getTraitSet(), child, groupSet, aggCalls); + } catch (InvalidRelException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java new file mode 100644 index 0000000..ebf420d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java @@ -0,0 +1,53 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.RelFactories.FilterFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.rex.RexNode; + +public class HiveFilterRel extends FilterRelBase implements HiveRel { + + public static final FilterFactory DEFAULT_FILTER_FACTORY = new HiveFilterFactoryImpl(); + + public HiveFilterRel(RelOptCluster cluster, RelTraitSet traits, RelNode child, RexNode condition) { + super(cluster, TraitsUtil.getFilterTraitSet(cluster, traits, child), child, condition); + } + + @Override + public FilterRelBase copy(RelTraitSet traitSet, RelNode input, RexNode condition) { + assert traitSet.containsIfApplicable(HiveRel.CONVENTION); + return new HiveFilterRel(getCluster(), traitSet, input, getCondition()); + } + + @Override + public void implement(Implementor implementor) { + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + /** + * Implementation of {@link FilterFactory} that returns + * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel} + * . + */ + private static class HiveFilterFactoryImpl implements FilterFactory { + @Override + public RelNode createFilter(RelNode child, RexNode condition) { + RelOptCluster cluster = child.getCluster(); + HiveFilterRel filter = new HiveFilterRel(cluster, TraitsUtil.getFilterTraitSet(cluster, null, + child), child, condition); + return filter; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java new file mode 100644 index 0000000..6f642b2 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java @@ -0,0 +1,138 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.Collections; +import java.util.Set; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelFactories.JoinFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexNode; + +//TODO: Should we convert MultiJoin to be a child of HiveJoinRelBase +public class HiveJoinRel extends JoinRelBase implements HiveRel { + // NOTE: COMMON_JOIN & SMB_JOIN are Sort Merge Join (in case of COMMON_JOIN + // each parallel computation handles multiple splits where as in case of SMB + // each parallel computation handles one bucket). MAP_JOIN and BUCKET_JOIN is + // hash joins where MAP_JOIN keeps the whole data set of non streaming tables + // in memory where as BUCKET_JOIN keeps only the b + public enum JoinAlgorithm { + NONE, COMMON_JOIN, MAP_JOIN, BUCKET_JOIN, SMB_JOIN + } + + public enum MapJoinStreamingRelation { + NONE, LEFT_RELATION, RIGHT_RELATION + } + + public static final JoinFactory HIVE_JOIN_FACTORY = new HiveJoinFactoryImpl(); + + private final boolean m_leftSemiJoin; + private final JoinAlgorithm m_joinAlgorithm; + private final MapJoinStreamingRelation m_mapJoinStreamingSide = MapJoinStreamingRelation.NONE; + + public static HiveJoinRel getJoin(RelOptCluster cluster, RelNode left, RelNode right, + RexNode condition, JoinRelType joinType, boolean leftSemiJoin) { + try { + Set variablesStopped = Collections.emptySet(); + return new HiveJoinRel(cluster, null, left, right, condition, joinType, variablesStopped, + JoinAlgorithm.NONE, null, leftSemiJoin); + } catch (InvalidRelException e) { + throw new RuntimeException(e); + } + } + + protected HiveJoinRel(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right, + RexNode condition, JoinRelType joinType, Set variablesStopped, + JoinAlgorithm joinAlgo, MapJoinStreamingRelation streamingSideForMapJoin, boolean leftSemiJoin) + throws InvalidRelException { + super(cluster, TraitsUtil.getJoinTraitSet(cluster, traits), left, right, condition, joinType, + variablesStopped); + this.m_joinAlgorithm = joinAlgo; + m_leftSemiJoin = leftSemiJoin; + } + + @Override + public void implement(Implementor implementor) { + } + + @Override + public final HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left, + RelNode right, JoinRelType joinType, boolean semiJoinDone) { + try { + Set variablesStopped = Collections.emptySet(); + return new HiveJoinRel(getCluster(), traitSet, left, right, conditionExpr, joinType, + variablesStopped, JoinAlgorithm.NONE, null, m_leftSemiJoin); + } catch (InvalidRelException e) { + // Semantic error not possible. Must be a bug. Convert to + // internal error. + throw new AssertionError(e); + } + } + + public JoinAlgorithm getJoinAlgorithm() { + return m_joinAlgorithm; + } + + public boolean isLeftSemiJoin() { + return m_leftSemiJoin; + } + + /** + * Model cost of join as size of Inputs. + */ + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + double leftRCount = RelMetadataQuery.getRowCount(getLeft()); + double rightRCount = RelMetadataQuery.getRowCount(getRight()); + return HiveCost.FACTORY.makeCost(leftRCount + rightRCount, 0.0, 0.0); + } + + /** + * @return returns rowtype representing only the left join input + */ + @Override + public RelDataType deriveRowType() { + if (m_leftSemiJoin) { + return deriveJoinRowType(left.getRowType(), null, JoinRelType.INNER, + getCluster().getTypeFactory(), null, + Collections. emptyList()); + } + return super.deriveRowType(); + } + + private static class HiveJoinFactoryImpl implements JoinFactory { + /** + * Creates a join. + * + * @param left + * Left input + * @param right + * Right input + * @param condition + * Join condition + * @param joinType + * Join type + * @param variablesStopped + * Set of names of variables which are set by the LHS and used by + * the RHS and are not available to nodes above this JoinRel in the + * tree + * @param semiJoinDone + * Whether this join has been translated to a semi-join + */ + @Override + public RelNode createJoin(RelNode left, RelNode right, RexNode condition, JoinRelType joinType, + Set variablesStopped, boolean semiJoinDone) { + return getJoin(left.getCluster(), left, right, condition, joinType, false); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java new file mode 100644 index 0000000..bf37a7b --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java @@ -0,0 +1,40 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SingleRel; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.rex.RexNode; + +public class HiveLimitRel extends SingleRel implements HiveRel { + private final RexNode offset; + private final RexNode fetch; + + HiveLimitRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RexNode offset, + RexNode fetch) { + super(cluster, TraitsUtil.getLimitTraitSet(cluster, traitSet, child), child); + this.offset = offset; + this.fetch = fetch; + assert getConvention() instanceof HiveRel; + assert getConvention() == child.getConvention(); + } + + @Override + public HiveLimitRel copy(RelTraitSet traitSet, List newInputs) { + return new HiveLimitRel(getCluster(), traitSet, sole(newInputs), offset, fetch); + } + + public void implement(Implementor implementor) { + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java new file mode 100644 index 0000000..a60af2e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java @@ -0,0 +1,168 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import com.google.common.collect.ImmutableList; + +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelFactories.ProjectFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.util.mapping.Mapping; +import org.eigenbase.util.mapping.MappingType; + +public class HiveProjectRel extends ProjectRelBase implements HiveRel { + + public static final ProjectFactory DEFAULT_PROJECT_FACTORY = new HiveProjectFactoryImpl(); + + private final List m_virtualCols; + + /** + * Creates a HiveProjectRel. + * + * @param cluster + * Cluster this relational expression belongs to + * @param child + * input relational expression + * @param exps + * List of expressions for the input columns + * @param rowType + * output row type + * @param flags + * values as in {@link ProjectRelBase.Flags} + */ + public HiveProjectRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, + List exps, RelDataType rowType, int flags) { + super(cluster, traitSet, child, exps, rowType, flags); + m_virtualCols = ImmutableList.copyOf(HiveOptiqUtil.getVirtualCols(exps)); + } + + /** + * Creates a HiveProjectRel with no sort keys. + * + * @param child + * input relational expression + * @param exps + * set of expressions for the input columns + * @param fieldNames + * aliases of the expressions + */ + public static HiveProjectRel create(RelNode child, List exps, List fieldNames) { + RelOptCluster cluster = child.getCluster(); + RelDataType rowType = RexUtil.createStructType(cluster.getTypeFactory(), exps, fieldNames); + return create(cluster, child, exps, rowType, Collections. emptyList()); + } + + /** + * Creates a HiveProjectRel. + */ + public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List exps, + RelDataType rowType, final List collationList) { + RelTraitSet traitSet = TraitsUtil.getSelectTraitSet(cluster, child); + return new HiveProjectRel(cluster, traitSet, child, exps, rowType, Flags.BOXED); + } + + /** + * Creates a relational expression which projects the output fields of a + * relational expression according to a partial mapping. + * + *

+ * A partial mapping is weaker than a permutation: every target has one + * source, but a source may have 0, 1 or more than one targets. Usually the + * result will have fewer fields than the source, unless some source fields + * are projected multiple times. + * + *

+ * This method could optimize the result as {@link #permute} does, but does + * not at present. + * + * @param rel + * Relational expression + * @param mapping + * Mapping from source fields to target fields. The mapping type must + * obey the constraints {@link MappingType#isMandatorySource()} and + * {@link MappingType#isSingleSource()}, as does + * {@link MappingType#INVERSE_FUNCTION}. + * @param fieldNames + * Field names; if null, or if a particular entry is null, the name + * of the permuted field is used + * @return relational expression which projects a subset of the input fields + */ + public static RelNode projectMapping(RelNode rel, Mapping mapping, List fieldNames) { + assert mapping.getMappingType().isSingleSource(); + assert mapping.getMappingType().isMandatorySource(); + + if (mapping.isIdentity()) { + return rel; + } + + final List outputNameList = new ArrayList(); + final List outputProjList = new ArrayList(); + final List fields = rel.getRowType().getFieldList(); + final RexBuilder rexBuilder = rel.getCluster().getRexBuilder(); + + for (int i = 0; i < mapping.getTargetCount(); i++) { + int source = mapping.getSource(i); + final RelDataTypeField sourceField = fields.get(source); + outputNameList + .add(((fieldNames == null) || (fieldNames.size() <= i) || (fieldNames.get(i) == null)) ? sourceField + .getName() : fieldNames.get(i)); + outputProjList.add(rexBuilder.makeInputRef(rel, source)); + } + + return create(rel, outputProjList, outputNameList); + } + + @Override + public ProjectRelBase copy(RelTraitSet traitSet, RelNode input, List exps, + RelDataType rowType) { + assert traitSet.containsIfApplicable(HiveRel.CONVENTION); + return new HiveProjectRel(getCluster(), traitSet, input, exps, rowType, getFlags()); + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + @Override + public void implement(Implementor implementor) { + } + + public List getVirtualCols() { + return m_virtualCols; + } + + /** + * Implementation of {@link ProjectFactory} that returns + * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel} + * . + */ + private static class HiveProjectFactoryImpl implements ProjectFactory { + + @Override + public RelNode createProject(RelNode child, + List childExprs, List fieldNames) { + RelNode project = HiveProjectRel.create(child, childExprs, fieldNames); + + // Make sure extra traits are carried over from the original rel + project = RelOptRule.convert(project, child.getTraitSet()); + return project; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java new file mode 100644 index 0000000..6f3f1d8 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java @@ -0,0 +1,19 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.Convention; + +public interface HiveRel extends RelNode { + void implement(Implementor implementor); + + /** Calling convention for relational operations that occur in Hive. */ + final Convention CONVENTION = new Convention.Impl("HIVE", HiveRel.class); + + class Implementor { + + public void visitChild(int ordinal, RelNode input) { + assert ordinal == 0; + ((HiveRel) input).implement(this); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java new file mode 100644 index 0000000..3bd7889 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java @@ -0,0 +1,49 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelFactories; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SortRel; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.rex.RexNode; + +public class HiveSortRel extends SortRel implements HiveRel { + + public static final HiveSortRelFactory HIVE_SORT_REL_FACTORY = new HiveSortRelFactory(); + + public HiveSortRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, + RelCollation collation, RexNode offset, RexNode fetch) { + super(cluster, TraitsUtil.getSortTraitSet(cluster, traitSet, collation), child, collation, + offset, fetch); + + assert getConvention() == child.getConvention(); + } + + @Override + public HiveSortRel copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation, + RexNode offset, RexNode fetch) { + // TODO: can we blindly copy sort trait? What if inputs changed and we + // are now sorting by different cols + RelCollation canonizedCollation = traitSet.canonize(newCollation); + return new HiveSortRel(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch); + } + + public RexNode getFetchExpr() { + return fetch; + } + + @Override + public void implement(Implementor implementor) { + } + + private static class HiveSortRelFactory implements RelFactories.SortFactory { + + @Override + public RelNode createSort(RelTraitSet traits, RelNode child, + RelCollation collation, RexNode offset, RexNode fetch) { + return new HiveSortRel(child.getCluster(), traits, child, collation, offset, fetch); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java new file mode 100644 index 0000000..4fe1735 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java @@ -0,0 +1,76 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; + + +/** + * Relational expression representing a scan of a HiveDB collection. + * + *

+ * Additional operations might be applied, using the "find" or "aggregate" + * methods. + *

+ */ +public class HiveTableScanRel extends TableAccessRelBase implements HiveRel { + private List m_hiveColStat; + + /** + * Creates a HiveTableScan. + * + * @param cluster + * Cluster + * @param traitSet + * Traits + * @param table + * Table + * @param table + * HiveDB table + */ + public HiveTableScanRel(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table, + RelDataType rowtype) { + super(cluster, TraitsUtil.getTableScanTraitSet(cluster, traitSet, table, rowtype), table); + assert getConvention() == HiveRel.CONVENTION; + } + + @Override + public RelNode copy(RelTraitSet traitSet, List inputs) { + assert inputs.isEmpty(); + return this; + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + @Override + public void register(RelOptPlanner planner) { + + } + + @Override + public void implement(Implementor implementor) { + + } + + @Override + public double getRows() { + return ((RelOptHiveTable) table).getRowCount(); + } + + public List getColStat(List projIndxLst) { + return ((RelOptHiveTable) table).getColStat(projIndxLst); + } +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java new file mode 100644 index 0000000..ccd52b0 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java @@ -0,0 +1,40 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel.Implementor; +import org.eigenbase.rel.RelFactories; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SetOpRel; +import org.eigenbase.rel.UnionRelBase; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.sql.SqlKind; + +public class HiveUnionRel extends UnionRelBase { + + public static final HiveUnionRelFactory UNION_REL_FACTORY = new HiveUnionRelFactory(); + + public HiveUnionRel(RelOptCluster cluster, RelTraitSet traits, List inputs) { + super(cluster, traits, inputs, true); + } + + @Override + public SetOpRel copy(RelTraitSet traitSet, List inputs, boolean all) { + return new HiveUnionRel(this.getCluster(), traitSet, inputs); + } + + public void implement(Implementor implementor) { + } + + private static class HiveUnionRelFactory implements RelFactories.SetOpFactory { + + @Override + public RelNode createSetOp(SqlKind kind, List inputs, boolean all) { + if (kind != SqlKind.UNION) { + throw new IllegalStateException("Expected to get Set operator of type Union. Found : " + kind); + } + return new HiveUnionRel(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java new file mode 100644 index 0000000..a34b532 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java @@ -0,0 +1,12 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.eigenbase.rel.rules.MergeProjectRule; + +public class HiveMergeProjectRule extends MergeProjectRule { + public static final HiveMergeProjectRule INSTANCE = new HiveMergeProjectRule(); + + public HiveMergeProjectRule() { + super(true, HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java new file mode 100644 index 0000000..6f06c6a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptRuleCall; +import org.eigenbase.rex.RexNode; +import org.eigenbase.util.Pair; + +public class HivePartitionPrunerRule extends RelOptRule { + + HiveConf conf; + + public HivePartitionPrunerRule(HiveConf conf) { + super(operand(HiveFilterRel.class, operand(HiveTableScanRel.class, none()))); + this.conf = conf; + } + + @Override + public void onMatch(RelOptRuleCall call) { + HiveFilterRel filter = call.rel(0); + HiveTableScanRel tScan = call.rel(1); + perform(call, filter, tScan); + } + + protected void perform(RelOptRuleCall call, FilterRelBase filter, + HiveTableScanRel tScan) { + + RelOptHiveTable hiveTable = (RelOptHiveTable) tScan.getTable(); + RexNode predicate = filter.getCondition(); + + Pair predicates = PartitionPruner + .extractPartitionPredicates(filter.getCluster(), hiveTable, predicate); + RexNode partColExpr = predicates.left; + RexNode remainingExpr = predicates.right; + remainingExpr = remainingExpr == null ? filter.getCluster().getRexBuilder() + .makeLiteral(true) : remainingExpr; + hiveTable.computePartitionList(conf, partColExpr); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java new file mode 100644 index 0000000..a48112e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java @@ -0,0 +1,44 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.rules.PullUpProjectsAboveJoinRule; +import org.eigenbase.relopt.RelOptRuleOperand; + +public class HivePullUpProjectsAboveJoinRule extends PullUpProjectsAboveJoinRule { + + public static final HivePullUpProjectsAboveJoinRule BOTH_PROJECT = new HivePullUpProjectsAboveJoinRule( + operand( + HiveJoinRel.class, + operand( + ProjectRelBase.class, + any()), + operand( + ProjectRelBase.class, + any())), + "HivePullUpProjectsAboveJoinRule: with two HiveProjectRel children"); + + public static final HivePullUpProjectsAboveJoinRule LEFT_PROJECT = new HivePullUpProjectsAboveJoinRule( + operand( + HiveJoinRel.class, + some(operand( + ProjectRelBase.class, + any()))), + "HivePullUpProjectsAboveJoinRule: with HiveProjectRel on left"); + + public static final HivePullUpProjectsAboveJoinRule RIGHT_PROJECT = new HivePullUpProjectsAboveJoinRule( + operand( + HiveJoinRel.class, + operand(RelNode.class, + any()), + operand( + ProjectRelBase.class, + any())), + "HivePullUpProjectsAboveJoinRule: with HiveProjectRel on right"); + + public HivePullUpProjectsAboveJoinRule(RelOptRuleOperand operand, String description) { + super(operand, description, HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java new file mode 100644 index 0000000..f8d1ac1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java @@ -0,0 +1,276 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.ListIterator; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptRuleCall; +import org.eigenbase.relopt.RelOptRuleOperand; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelOptUtil.InputFinder; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.util.Holder; + +import com.google.common.collect.ImmutableList; + +public abstract class HivePushFilterPastJoinRule extends RelOptRule { + + public static final HivePushFilterPastJoinRule FILTER_ON_JOIN = new HivePushFilterPastJoinRule( + operand(FilterRelBase.class, operand(HiveJoinRel.class, any())), + "HivePushFilterPastJoinRule:filter", true) { + @Override + public void onMatch(RelOptRuleCall call) { + HiveFilterRel filter = call.rel(0); + HiveJoinRel join = call.rel(1); + perform(call, filter, join); + } + }; + + public static final HivePushFilterPastJoinRule JOIN = new HivePushFilterPastJoinRule( + operand(HiveJoinRel.class, any()), "HivePushFilterPastJoinRule:no-filter", false) { + @Override + public void onMatch(RelOptRuleCall call) { + HiveJoinRel join = call.rel(0); + perform(call, null, join); + } + }; + + /** Whether to try to strengthen join-type. */ + private final boolean smart; + + // ~ Constructors ----------------------------------------------------------- + + /** + * Creates a PushFilterPastJoinRule with an explicit root operand. + */ + private HivePushFilterPastJoinRule(RelOptRuleOperand operand, String id, boolean smart) { + super(operand, "PushFilterRule: " + id); + this.smart = smart; + } + + // ~ Methods ---------------------------------------------------------------- + + protected void perform(RelOptRuleCall call, FilterRelBase filter, + JoinRelBase join) { + final List joinFilters = RelOptUtil.conjunctions(join + .getCondition()); + + /* + * todo: hb 6/26/14 for left SemiJoin we cannot push predicates yet. The + * assertion that num(JoinRel columns) = num(leftSrc) + num(rightSrc) + * doesn't hold. So RelOptUtil.classifyFilters fails. + */ + if (((HiveJoinRel) join).isLeftSemiJoin()) { + return; + } + + if (filter == null) { + // There is only the joinRel + // make sure it does not match a cartesian product joinRel + // (with "true" condition) otherwise this rule will be applied + // again on the new cartesian product joinRel. + boolean onlyTrueFilter = true; + for (RexNode joinFilter : joinFilters) { + if (!joinFilter.isAlwaysTrue()) { + onlyTrueFilter = false; + break; + } + } + + if (onlyTrueFilter) { + return; + } + } + + final List aboveFilters = filter != null ? RelOptUtil + .conjunctions(filter.getCondition()) : ImmutableList. of(); + + List leftFilters = new ArrayList(); + List rightFilters = new ArrayList(); + int origJoinFiltersSz = joinFilters.size(); + + // TODO - add logic to derive additional filters. E.g., from + // (t1.a = 1 AND t2.a = 2) OR (t1.b = 3 AND t2.b = 4), you can + // derive table filters: + // (t1.a = 1 OR t1.b = 3) + // (t2.a = 2 OR t2.b = 4) + + // Try to push down above filters. These are typically where clause + // filters. They can be pushed down if they are not on the NULL + // generating side. + boolean filterPushed = false; + final Holder joinTypeHolder = Holder.of(join.getJoinType()); + if (RelOptUtil.classifyFilters(join, aboveFilters, + join.getJoinType(), true, !join.getJoinType().generatesNullsOnLeft(), !join.getJoinType() + .generatesNullsOnRight(), joinFilters, leftFilters, rightFilters, joinTypeHolder, smart)) { + filterPushed = true; + } + + /* + * Any predicates pushed down to joinFilters that aren't equality + * conditions: put them back as aboveFilters because Hive doesn't support + * not equi join conditions. + */ + ListIterator filterIter = joinFilters.listIterator(); + while (filterIter.hasNext()) { + RexNode exp = filterIter.next(); + if (exp instanceof RexCall) { + RexCall c = (RexCall) exp; + if (c.getOperator().getKind() == SqlKind.EQUALS) { + boolean validHiveJoinFilter = true; + for (RexNode rn : c.getOperands()) { + // NOTE: Hive dis-allows projections from both left & right side + // of join condition. Example: Hive disallows + // (r1.x=r2.x)=(r1.y=r2.y) on join condition. + if (filterRefersToBothSidesOfJoin(rn, join)) { + validHiveJoinFilter = false; + break; + } + } + if (validHiveJoinFilter) + continue; + } + } + aboveFilters.add(exp); + filterIter.remove(); + } + + /* + * if all pushed filters where put back then set filterPushed to false + */ + if (leftFilters.size() == 0 && rightFilters.size() == 0 + && joinFilters.size() == origJoinFiltersSz) { + filterPushed = false; + } + + // Try to push down filters in ON clause. A ON clause filter can only be + // pushed down if it does not affect the non-matching set, i.e. it is + // not on the side which is preserved. + if (RelOptUtil.classifyFilters(join, joinFilters, null, false, !join + .getJoinType().generatesNullsOnRight(), !join.getJoinType() + .generatesNullsOnLeft(), joinFilters, leftFilters, rightFilters, joinTypeHolder, false)) { + filterPushed = true; + } + + if (!filterPushed) { + return; + } + + /* + * Remove always true conditions that got pushed down. + */ + removeAlwaysTruePredicates(leftFilters); + removeAlwaysTruePredicates(rightFilters); + removeAlwaysTruePredicates(joinFilters); + + // create FilterRels on top of the children if any filters were + // pushed to them + RexBuilder rexBuilder = join.getCluster().getRexBuilder(); + RelNode leftRel = createFilterOnRel(rexBuilder, join.getLeft(), leftFilters); + RelNode rightRel = createFilterOnRel(rexBuilder, join.getRight(), + rightFilters); + + // create the new join node referencing the new children and + // containing its new join filters (if there are any) + RexNode joinFilter; + + if (joinFilters.size() == 0) { + // if nothing actually got pushed and there is nothing leftover, + // then this rule is a no-op + if (leftFilters.isEmpty() + && rightFilters.isEmpty() + && joinTypeHolder.get() == join.getJoinType()) { + return; + } + joinFilter = rexBuilder.makeLiteral(true); + } else { + joinFilter = RexUtil.composeConjunction(rexBuilder, joinFilters, true); + } + RelNode newJoinRel = HiveJoinRel.getJoin(join.getCluster(), leftRel, + rightRel, joinFilter, join.getJoinType(), false); + + // create a FilterRel on top of the join if needed + RelNode newRel = createFilterOnRel(rexBuilder, newJoinRel, aboveFilters); + + call.transformTo(newRel); + } + + /** + * If the filter list passed in is non-empty, creates a FilterRel on top of + * the existing RelNode; otherwise, just returns the RelNode + * + * @param rexBuilder + * rex builder + * @param rel + * the RelNode that the filter will be put on top of + * @param filters + * list of filters + * @return new RelNode or existing one if no filters + */ + private RelNode createFilterOnRel(RexBuilder rexBuilder, RelNode rel, + List filters) { + RexNode andFilters = RexUtil.composeConjunction(rexBuilder, filters, false); + if (andFilters.isAlwaysTrue()) { + return rel; + } + return new HiveFilterRel(rel.getCluster(), rel.getCluster().traitSetOf( + HiveRel.CONVENTION), rel, andFilters); + } + + private void removeAlwaysTruePredicates(List predicates) { + + ListIterator iter = predicates.listIterator(); + while (iter.hasNext()) { + RexNode exp = iter.next(); + if (isAlwaysTrue(exp)) { + iter.remove(); + } + } + } + + private boolean isAlwaysTrue(RexNode predicate) { + if (predicate instanceof RexCall) { + RexCall c = (RexCall) predicate; + if (c.getOperator().getKind() == SqlKind.EQUALS) { + return isAlwaysTrue(c.getOperands().get(0)) + && isAlwaysTrue(c.getOperands().get(1)); + } + } + return predicate.isAlwaysTrue(); + } + + private boolean filterRefersToBothSidesOfJoin(RexNode filter, JoinRelBase j) { + boolean refersToBothSides = false; + + int joinNoOfProjects = j.getRowType().getFieldCount(); + BitSet filterProjs = new BitSet(joinNoOfProjects); + BitSet allLeftProjs = new BitSet(joinNoOfProjects); + BitSet allRightProjs = new BitSet(joinNoOfProjects); + allLeftProjs.set(0, j.getInput(0).getRowType().getFieldCount(), true); + allRightProjs.set(j.getInput(0).getRowType().getFieldCount(), joinNoOfProjects, true); + + InputFinder inputFinder = new InputFinder(filterProjs); + filter.accept(inputFinder); + + if (allLeftProjs.intersects(filterProjs) && allRightProjs.intersects(filterProjs)) + refersToBothSides = true; + + return refersToBothSides; + } +} + +// End PushFilterPastJoinRule.java + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java new file mode 100644 index 0000000..0714bed --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java @@ -0,0 +1,37 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel.JoinAlgorithm; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.rules.PushJoinThroughJoinRule; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptRuleCall; + +public class HivePushJoinThroughJoinRule extends PushJoinThroughJoinRule { + public static final RelOptRule RIGHT = new HivePushJoinThroughJoinRule( + "Hive PushJoinThroughJoinRule:right", true, + HiveJoinRel.class); + public static final RelOptRule LEFT = new HivePushJoinThroughJoinRule( + "Hive PushJoinThroughJoinRule:left", false, + HiveJoinRel.class); + + private HivePushJoinThroughJoinRule(String description, boolean right, + Class clazz) { + super(description, right, clazz, HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } + + @Override + public boolean matches(RelOptRuleCall call) { + boolean isAMatch = false; + final HiveJoinRel topJoin = call.rel(0); + final HiveJoinRel bottomJoin = call.rel(1); + + if (!topJoin.isLeftSemiJoin() && topJoin.getJoinAlgorithm() == JoinAlgorithm.NONE + && bottomJoin.getJoinAlgorithm() == JoinAlgorithm.NONE) { + isAMatch = true; + } + + return isAMatch; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveSwapJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveSwapJoinRule.java new file mode 100644 index 0000000..10a9cb8 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveSwapJoinRule.java @@ -0,0 +1,25 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel.JoinAlgorithm; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; + +import org.eigenbase.rel.rules.SwapJoinRule; +import org.eigenbase.relopt.RelOptRuleCall; + +public class HiveSwapJoinRule extends SwapJoinRule { + public static final HiveSwapJoinRule INSTANCE = new HiveSwapJoinRule(); + + private HiveSwapJoinRule() { + super(HiveJoinRel.class, HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } + + @Override + public boolean matches(RelOptRuleCall call) { + if (call. rel(0).isLeftSemiJoin()) + return false; + else + return super.matches(call) + && call. rel(0).getJoinAlgorithm() == JoinAlgorithm.NONE; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java new file mode 100644 index 0000000..d9d94f6 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.util.Pair; + +public class PartitionPruner { + + /** + * Breaks the predicate into 2 pieces. The first piece is the expressions that + * only contain partition columns and can be used for Partition Pruning; the + * second piece is the predicates that are left. + * + * @param cluster + * @param hiveTable + * @param predicate + * @return a Pair of expressions, each of which maybe null. The 1st predicate + * is expressions that only contain partition columns; the 2nd + * predicate contains the remaining predicates. + */ + public static Pair extractPartitionPredicates( + RelOptCluster cluster, RelOptHiveTable hiveTable, RexNode predicate) { + RexNode partitionPruningPred = predicate + .accept(new ExtractPartPruningPredicate(cluster, hiveTable)); + RexNode remainingPred = predicate.accept(new ExtractRemainingPredicate( + cluster, partitionPruningPred)); + return new Pair(partitionPruningPred, remainingPred); + } + + public static class ExtractPartPruningPredicate extends + RexVisitorImpl { + + final RelOptHiveTable hiveTable; + final RelDataType rType; + final Set partCols; + final RelOptCluster cluster; + + public ExtractPartPruningPredicate(RelOptCluster cluster, + RelOptHiveTable hiveTable) { + super(true); + this.hiveTable = hiveTable; + rType = hiveTable.getRowType(); + List pfs = hiveTable.getHiveTableMD().getPartCols(); + partCols = new HashSet(); + for (FieldSchema pf : pfs) { + partCols.add(pf.getName()); + } + this.cluster = cluster; + } + + @Override + public RexNode visitLiteral(RexLiteral literal) { + return literal; + } + + @Override + public RexNode visitInputRef(RexInputRef inputRef) { + RelDataTypeField f = rType.getFieldList().get(inputRef.getIndex()); + if (partCols.contains(f.getName())) { + return inputRef; + } else { + return null; + } + } + + @Override + public RexNode visitCall(RexCall call) { + if (!deep) { + return null; + } + + List args = new LinkedList(); + boolean argsPruned = false; + + for (RexNode operand : call.operands) { + RexNode n = operand.accept(this); + if (n != null) { + args.add(n); + } else { + argsPruned = true; + } + } + + if (call.getOperator() != SqlStdOperatorTable.AND) { + return argsPruned ? null : call; + } else { + if (args.size() == 0) { + return null; + } else if (args.size() == 1) { + return args.get(0); + } else { + return cluster.getRexBuilder().makeCall(call.getOperator(), args); + } + } + } + + } + + public static class ExtractRemainingPredicate extends RexVisitorImpl { + + List pruningPredicates; + final RelOptCluster cluster; + + public ExtractRemainingPredicate(RelOptCluster cluster, + RexNode partPruningExpr) { + super(true); + this.cluster = cluster; + pruningPredicates = new ArrayList(); + flattenPredicates(partPruningExpr); + } + + private void flattenPredicates(RexNode r) { + if (r instanceof RexCall + && ((RexCall) r).getOperator() == SqlStdOperatorTable.AND) { + for (RexNode c : ((RexCall) r).getOperands()) { + flattenPredicates(c); + } + } else { + pruningPredicates.add(r); + } + } + + @Override + public RexNode visitLiteral(RexLiteral literal) { + return literal; + } + + @Override + public RexNode visitInputRef(RexInputRef inputRef) { + return inputRef; + } + + @Override + public RexNode visitCall(RexCall call) { + if (!deep) { + return null; + } + + if (call.getOperator() != SqlStdOperatorTable.AND) { + if (pruningPredicates.contains(call)) { + return null; + } else { + return call; + } + } + + List args = new LinkedList(); + + for (RexNode operand : call.operands) { + RexNode n = operand.accept(this); + if (n != null) { + args.add(n); + } + } + + if (args.size() == 0) { + return null; + } else if (args.size() == 1) { + return args.get(0); + } else { + return cluster.getRexBuilder().makeCall(call.getOperator(), args); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/CBOTableStatsValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/CBOTableStatsValidator.java new file mode 100644 index 0000000..370feaa --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/CBOTableStatsValidator.java @@ -0,0 +1,90 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Map; +import java.util.Stack; + +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.ForwardWalker; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; + +import com.google.common.collect.ImmutableMap; + +public class CBOTableStatsValidator { + private final CBOValidateStatsContext m_ctx = new CBOValidateStatsContext(); + + public boolean validStats(Operator sinkOp, ParseContext pCtx) { + Map rules = ImmutableMap + . builder() + .put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), + new TableScanProcessor()).build(); + + Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), rules, m_ctx); + GraphWalker fWalker = new ForwardWalker(disp); + + ArrayList topNodes = new ArrayList(); + topNodes.addAll(pCtx.getTopOps().values()); + + try { + fWalker.startWalking(topNodes, null); + } catch (SemanticException e) { + throw new RuntimeException(e); + } + + return (m_ctx.m_tabsWithIncompleteStats.isEmpty()); + } + + public String getIncompleteStatsTabNames() { + StringBuilder sb = new StringBuilder(); + for (String tabName : m_ctx.m_tabsWithIncompleteStats) { + if (sb.length() > 1) + sb.append(", "); + sb.append(tabName); + } + return sb.toString(); + } + + private static NodeProcessor getDefaultProc() { + return new NodeProcessor() { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) { + return null; + // TODO: Shouldn't we throw exception? as this would imply we got an op + // tree with no TS + } + }; + } + + static class TableScanProcessor implements NodeProcessor { + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) { + TableScanOperator tableScanOp = (TableScanOperator) nd; + Statistics stats = tableScanOp.getStatistics(); + int noColsWithStats = (stats != null && stats.getColumnStats() != null) ? stats + .getColumnStats().size() : 0; + if (noColsWithStats != tableScanOp.getNeededColumns().size()) { + ((CBOValidateStatsContext) procCtx).m_tabsWithIncompleteStats.add(tableScanOp.getConf() + .getAlias()); + } + return null; + } + } + + static class CBOValidateStatsContext implements NodeProcessorCtx { + final private HashSet m_tabsWithIncompleteStats = new HashSet(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java new file mode 100644 index 0000000..f8e3238 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java @@ -0,0 +1,214 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.BitSet; + +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.ProjectRel; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelOptUtil.InputReferencedVisitor; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.sql.SqlKind; + +public class FilterSelectivityEstimator extends RexVisitorImpl { + private final RelNode m_childRel; + private final double m_childCardinality; + + protected FilterSelectivityEstimator(RelNode childRel) { + super(true); + m_childRel = childRel; + m_childCardinality = RelMetadataQuery.getRowCount(m_childRel); + } + + public Double estimateSelectivity(RexNode predicate) { + return predicate.accept(this); + } + + public Double visitCall(RexCall call) { + if (!deep) { + return 1.0; + } + + /* + * Ignore any predicates on partition columns + * because we have already accounted for these in + * the Table row count. + */ + if (isPartitionPredicate(call, m_childRel)) { + return 1.0; + } + + Double selectivity = null; + SqlKind op = call.getKind(); + + switch (op) { + case AND: { + selectivity = computeConjunctionSelectivity(call); + break; + } + + case OR: { + selectivity = computeDisjunctionSelectivity(call); + break; + } + + case NOT_EQUALS: { + selectivity = computeNotEqualitySelectivity(call); + } + + case LESS_THAN_OR_EQUAL: + case GREATER_THAN_OR_EQUAL: + case LESS_THAN: + case GREATER_THAN: { + selectivity = ((double) 1 / (double) 3); + break; + } + + case IN: { + selectivity = ((double) 1 / ((double) call.operands.size())); + break; + } + + default: + selectivity = computeFunctionSelectivity(call); + } + + return selectivity; + } + + /** + * NDV of "f1(x, y, z) != f2(p, q, r)" -> + * "(maxNDV(x,y,z,p,q,r) - 1)/maxNDV(x,y,z,p,q,r)". + *

+ * + * @param call + * @return + */ + private Double computeNotEqualitySelectivity(RexCall call) { + double tmpNDV = getMaxNDV(call); + + if (tmpNDV > 1) + return (tmpNDV - (double) 1) / tmpNDV; + else + return 1.0; + } + + /** + * Selectivity of f(X,y,z) -> 1/maxNDV(x,y,z). + *

+ * Note that >, >=, <, <=, = ... are considered generic functions and uses + * this method to find their selectivity. + * + * @param call + * @return + */ + private Double computeFunctionSelectivity(RexCall call) { + return 1 / getMaxNDV(call); + } + + /** + * Disjunction Selectivity -> (1 D(1-m1/n)(1-m2/n)) where n is the total + * number of tuples from child and m1 and m2 is the expected number of tuples + * from each part of the disjunction predicate. + *

+ * Note we compute m1. m2.. by applying selectivity of the disjunctive element + * on the cardinality from child. + * + * @param call + * @return + */ + private Double computeDisjunctionSelectivity(RexCall call) { + Double tmpCardinality; + Double tmpSelectivity; + double selectivity = 1; + + for (RexNode dje : call.getOperands()) { + tmpSelectivity = dje.accept(this); + if (tmpSelectivity == null) { + tmpSelectivity = 0.99; + } + tmpCardinality = m_childCardinality * tmpSelectivity; + + if (tmpCardinality > 1) + tmpSelectivity = (1 - tmpCardinality / m_childCardinality); + else + tmpSelectivity = 1.0; + + selectivity *= tmpSelectivity; + } + + if (selectivity > 1) + return (1 - selectivity); + else + return 1.0; + } + + /** + * Selectivity of conjunctive predicate -> (selectivity of conjunctive + * element1) * (selectivity of conjunctive element2)... + * + * @param call + * @return + */ + private Double computeConjunctionSelectivity(RexCall call) { + Double tmpSelectivity; + double selectivity = 1; + + for (RexNode cje : call.getOperands()) { + tmpSelectivity = cje.accept(this); + if (tmpSelectivity != null) { + selectivity *= tmpSelectivity; + } + } + + return selectivity; + } + + private Double getMaxNDV(RexCall call) { + double tmpNDV; + double maxNDV = 1.0; + InputReferencedVisitor irv; + + for (RexNode op : call.getOperands()) { + if (op instanceof RexInputRef) { + tmpNDV = HiveRelMdDistinctRowCount.getDistinctRowCount(m_childRel, + ((RexInputRef) op).getIndex()); + if (tmpNDV > maxNDV) + maxNDV = tmpNDV; + } else { + irv = new InputReferencedVisitor(); + irv.apply(op); + for (Integer childProjIndx : irv.inputPosReferenced) { + tmpNDV = HiveRelMdDistinctRowCount.getDistinctRowCount(m_childRel, childProjIndx); + if (tmpNDV > maxNDV) + maxNDV = tmpNDV; + } + } + } + + return maxNDV; + } + + private boolean isPartitionPredicate(RexNode expr, RelNode r) { + if ( r instanceof ProjectRelBase ) { + expr = RelOptUtil.pushFilterPastProject(expr, (ProjectRelBase) r); + return isPartitionPredicate(expr, ((ProjectRelBase) r).getChild()); + } else if ( r instanceof FilterRelBase ) { + isPartitionPredicate(expr, ((ProjectRelBase) r).getChild()); + } else if ( r instanceof HiveTableScanRel ) { + RelOptHiveTable table = (RelOptHiveTable) + ((HiveTableScanRel)r).getTable(); + BitSet cols = RelOptUtil.InputFinder.bits(expr); + return table.containsPartitionColumnsOnly(cols); + } + return false; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java new file mode 100644 index 0000000..f3e91bb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java @@ -0,0 +1,110 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.BitSet; +import java.util.List; + +import net.hydromatic.optiq.BuiltinMethod; + +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.ChainedRelMetadataProvider; +import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMdDistinctRowCount; +import org.eigenbase.rel.metadata.RelMdUtil; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.rex.RexNode; + +import com.google.common.collect.ImmutableList; + +public class HiveRelMdDistinctRowCount extends RelMdDistinctRowCount { + + private static final HiveRelMdDistinctRowCount INSTANCE = + new HiveRelMdDistinctRowCount(); + + public static final RelMetadataProvider SOURCE = ChainedRelMetadataProvider + .of(ImmutableList.of( + + ReflectiveRelMetadataProvider.reflectiveSource( + BuiltinMethod.DISTINCT_ROW_COUNT.method, INSTANCE), + + ReflectiveRelMetadataProvider.reflectiveSource( + BuiltinMethod.CUMULATIVE_COST.method, INSTANCE))); + + private HiveRelMdDistinctRowCount() { + } + + // Catch-all rule when none of the others apply. + @Override + public Double getDistinctRowCount(RelNode rel, BitSet groupKey, + RexNode predicate) { + if (rel instanceof HiveTableScanRel) { + return getDistinctRowCount((HiveTableScanRel) rel, groupKey, predicate); + } + /* + * For now use Optiq' default formulas for propagating NDVs up the Query + * Tree. + */ + return super.getDistinctRowCount(rel, groupKey, predicate); + } + + private Double getDistinctRowCount(HiveTableScanRel htRel, BitSet groupKey, + RexNode predicate) { + List projIndxLst = HiveOptiqUtil + .translateBitSetToProjIndx(groupKey); + List colStats = htRel.getColStat(projIndxLst); + Double noDistinctRows = 1.0; + for (ColStatistics cStat : colStats) { + noDistinctRows *= cStat.getCountDistint(); + } + + return Math.min(noDistinctRows, htRel.getRows()); + } + + public static Double getDistinctRowCount(RelNode r, int indx) { + BitSet bitSetOfRqdProj = new BitSet(); + bitSetOfRqdProj.set(indx); + return RelMetadataQuery.getDistinctRowCount(r, bitSetOfRqdProj, r + .getCluster().getRexBuilder().makeLiteral(true)); + } + + @Override + public Double getDistinctRowCount(JoinRelBase rel, BitSet groupKey, + RexNode predicate) { + if (rel instanceof HiveJoinRel) { + HiveJoinRel hjRel = (HiveJoinRel) rel; + //TODO: Improve this + if (hjRel.isLeftSemiJoin()) { + return RelMetadataQuery.getDistinctRowCount(hjRel.getLeft(), groupKey, + rel.getCluster().getRexBuilder().makeLiteral(true)); + } else { + return RelMdUtil.getJoinDistinctRowCount(rel, rel.getJoinType(), + groupKey, predicate, true); + } + } + + return RelMetadataQuery.getDistinctRowCount(rel, groupKey, predicate); + } + + /* + * Favor Broad Plans over Deep Plans. + */ + public RelOptCost getCumulativeCost(HiveJoinRel rel) { + RelOptCost cost = RelMetadataQuery.getNonCumulativeCost(rel); + List inputs = rel.getInputs(); + RelOptCost maxICost = HiveCost.ZERO; + for (RelNode input : inputs) { + RelOptCost iCost = RelMetadataQuery.getCumulativeCost(input); + if (maxICost.isLt(iCost)) { + maxICost = iCost; + } + } + return cost.plus(maxICost); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java new file mode 100644 index 0000000..df70de2 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java @@ -0,0 +1,184 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import net.hydromatic.optiq.BuiltinMethod; + +import org.apache.hadoop.hive.ql.optimizer.optiq.JoinUtil.JoinLeafPredicateInfo; +import org.apache.hadoop.hive.ql.optimizer.optiq.JoinUtil.JoinPredicateInfo; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMdSelectivity; +import org.eigenbase.rel.metadata.RelMdUtil; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; + +import com.google.common.collect.ImmutableMap; + +public class HiveRelMdSelectivity extends RelMdSelectivity { + public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider.reflectiveSource( + BuiltinMethod.SELECTIVITY.method, + new HiveRelMdSelectivity()); + + protected HiveRelMdSelectivity() { + super(); + } + + public Double getSelectivity(HiveTableScanRel t, RexNode predicate) { + if (predicate != null) { + FilterSelectivityEstimator filterSelEstmator = new FilterSelectivityEstimator(t); + return filterSelEstmator.estimateSelectivity(predicate); + } + + return 1.0; + } + + public Double getSelectivity(HiveJoinRel j, RexNode predicate) { + if (j.getJoinType().equals(JoinRelType.INNER)) { + return computeInnerJoinSelectivity(j, predicate); + } + return 1.0; + } + + private Double computeInnerJoinSelectivity(HiveJoinRel j, RexNode predicate) { + double ndvCrossProduct = 1; + RexNode combinedPredicate = getCombinedPredicateForJoin(j, predicate); + JoinPredicateInfo jpi = JoinPredicateInfo.constructJoinPredicateInfo(j, + combinedPredicate); + ImmutableMap.Builder colStatMapBuilder = ImmutableMap + .builder(); + ImmutableMap colStatMap; + int rightOffSet = j.getLeft().getRowType().getFieldCount(); + + // 1. Update Col Stats Map with col stats for columns from left side of + // Join which are part of join keys + for (Integer ljk : jpi.getProjsFromLeftPartOfJoinKeysInChildSchema()) { + colStatMapBuilder.put(ljk, + HiveRelMdDistinctRowCount.getDistinctRowCount(j.getLeft(), ljk)); + } + + // 2. Update Col Stats Map with col stats for columns from right side of + // Join which are part of join keys + for (Integer rjk : jpi.getProjsFromRightPartOfJoinKeysInChildSchema()) { + colStatMapBuilder.put(rjk + rightOffSet, + HiveRelMdDistinctRowCount.getDistinctRowCount(j.getRight(), rjk)); + } + colStatMap = colStatMapBuilder.build(); + + // 3. Walk through the Join Condition Building NDV for selectivity + // NDV of the join can not exceed the cardinality of cross join. + List peLst = jpi.getEquiJoinPredicateElements(); + int noOfPE = peLst.size(); + if (noOfPE > 0) { + // 3.1 Use first conjunctive predicate element's NDV as the seed + ndvCrossProduct = getMaxNDVForJoinSelectivity(peLst.get(0), colStatMap); + + // 3.2 if conjunctive predicate elements are more than one, then walk + // through them one by one. Compute cross product of NDV. Cross product is + // computed by multiplying the largest NDV of all of the conjunctive + // predicate + // elements with degraded NDV of rest of the conjunctive predicate + // elements. NDV is + // degraded using log function.Finally the ndvCrossProduct is fenced at + // the join + // cross product to ensure that NDV can not exceed worst case join + // cardinality.
+ // NDV of a conjunctive predicate element is the max NDV of all arguments + // to lhs, rhs expressions. + // NDV(JoinCondition) = min (left cardinality * right cardinality, + // ndvCrossProduct(JoinCondition)) + // ndvCrossProduct(JoinCondition) = ndv(pex)*log(ndv(pe1))*log(ndv(pe2)) + // where pex is the predicate element of join condition with max ndv. + // ndv(pe) = max(NDV(left.Expr), NDV(right.Expr)) + // NDV(expr) = max(NDV( expr args)) + if (noOfPE > 1) { + double maxNDVSoFar = ndvCrossProduct; + double ndvToBeSmoothed; + double tmpNDV; + + for (int i = 1; i < noOfPE; i++) { + tmpNDV = getMaxNDVForJoinSelectivity(peLst.get(i), colStatMap); + if (tmpNDV > maxNDVSoFar) { + ndvToBeSmoothed = maxNDVSoFar; + maxNDVSoFar = tmpNDV; + ndvCrossProduct = (ndvCrossProduct / ndvToBeSmoothed) * tmpNDV; + } else { + ndvToBeSmoothed = tmpNDV; + } + // TODO: revisit the fence + if (ndvToBeSmoothed > 3) + ndvCrossProduct *= Math.log(ndvToBeSmoothed); + else + ndvCrossProduct *= ndvToBeSmoothed; + } + + if (j.isLeftSemiJoin()) + ndvCrossProduct = Math.min(RelMetadataQuery.getRowCount(j.getLeft()), + ndvCrossProduct); + else + ndvCrossProduct = Math.min(RelMetadataQuery.getRowCount(j.getLeft()) + * RelMetadataQuery.getRowCount(j.getRight()), ndvCrossProduct); + } + } + + // 4. Join Selectivity = 1/NDV + return (1 / ndvCrossProduct); + } + + private RexNode getCombinedPredicateForJoin(HiveJoinRel j, RexNode additionalPredicate) { + RexNode minusPred = RelMdUtil.minusPreds(j.getCluster().getRexBuilder(), additionalPredicate, + j.getCondition()); + + if (minusPred != null) { + List minusList = new ArrayList(); + minusList.add(j.getCondition()); + minusList.add(minusPred); + + return RexUtil.composeConjunction(j.getCluster().getRexBuilder(), minusList, true); + } + + return j.getCondition(); + } + + /** + * Compute Max NDV to determine Join Selectivity. + * + * @param jlpi + * @param colStatMap + * Immutable Map of Projection Index (in Join Schema) to Column Stat + * @param rightProjOffSet + * @return + */ + private static Double getMaxNDVForJoinSelectivity(JoinLeafPredicateInfo jlpi, + ImmutableMap colStatMap) { + Double maxNDVSoFar = 1.0; + + maxNDVSoFar = getMaxNDVFromProjections(colStatMap, + jlpi.getProjsFromLeftPartOfJoinKeysInJoinSchema(), maxNDVSoFar); + maxNDVSoFar = getMaxNDVFromProjections(colStatMap, + jlpi.getProjsFromRightPartOfJoinKeysInJoinSchema(), maxNDVSoFar); + + return maxNDVSoFar; + } + + private static Double getMaxNDVFromProjections(Map colStatMap, + Set projectionSet, Double defaultMaxNDV) { + Double colNDV = null; + Double maxNDVSoFar = defaultMaxNDV; + + for (Integer projIndx : projectionSet) { + colNDV = colStatMap.get(projIndx); + if (colNDV > maxNDVSoFar) + maxNDVSoFar = colNDV; + } + + return maxNDVSoFar; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java new file mode 100644 index 0000000..f9c0aeb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java @@ -0,0 +1,200 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.sql.Date; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Calendar; + +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.sql.type.SqlTypeName; + +class ASTBuilder { + + static ASTBuilder construct(int tokenType, String text) { + ASTBuilder b = new ASTBuilder(); + b.curr = createAST(tokenType, text); + return b; + } + + static ASTNode createAST(int tokenType, String text) { + return (ASTNode) ParseDriver.adaptor.create(tokenType, text); + } + + static ASTNode destNode() { + return ASTBuilder + .construct(HiveParser.TOK_DESTINATION, "TOK_DESTINATION") + .add( + ASTBuilder.construct(HiveParser.TOK_DIR, "TOK_DIR").add(HiveParser.TOK_TMP_FILE, + "TOK_TMP_FILE")).node(); + } + + static ASTNode table(TableAccessRelBase scan) { + RelOptHiveTable hTbl = (RelOptHiveTable) scan.getTable(); + ASTBuilder b = ASTBuilder + .construct(HiveParser.TOK_TABREF, "TOK_TABREF") + .add( + ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME") + .add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName()) + .add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName())) + .add(HiveParser.Identifier, hTbl.getName()); + return b.node(); + } + + static ASTNode join(ASTNode left, ASTNode right, JoinRelType joinType, ASTNode cond, + boolean semiJoin) { + ASTBuilder b = null; + + switch (joinType) { + case INNER: + if (semiJoin) { + b = ASTBuilder.construct(HiveParser.TOK_LEFTSEMIJOIN, "TOK_LEFTSEMIJOIN"); + } else { + b = ASTBuilder.construct(HiveParser.TOK_JOIN, "TOK_JOIN"); + } + break; + case LEFT: + b = ASTBuilder.construct(HiveParser.TOK_LEFTOUTERJOIN, "TOK_LEFTOUTERJOIN"); + break; + case RIGHT: + b = ASTBuilder.construct(HiveParser.TOK_RIGHTOUTERJOIN, "TOK_RIGHTOUTERJOIN"); + break; + case FULL: + b = ASTBuilder.construct(HiveParser.TOK_FULLOUTERJOIN, "TOK_FULLOUTERJOIN"); + break; + } + + b.add(left).add(right).add(cond); + return b.node(); + } + + static ASTNode subQuery(ASTNode qry, String alias) { + return ASTBuilder.construct(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY").add(qry) + .add(HiveParser.Identifier, alias).node(); + } + + static ASTNode qualifiedName(String tableName, String colName) { + ASTBuilder b = ASTBuilder + .construct(HiveParser.DOT, ".") + .add( + ASTBuilder.construct(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL").add( + HiveParser.Identifier, tableName)).add(HiveParser.Identifier, colName); + return b.node(); + } + + static ASTNode unqualifiedName(String colName) { + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL").add( + HiveParser.Identifier, colName); + return b.node(); + } + + static ASTNode where(ASTNode cond) { + return ASTBuilder.construct(HiveParser.TOK_WHERE, "TOK_WHERE").add(cond).node(); + } + + static ASTNode having(ASTNode cond) { + return ASTBuilder.construct(HiveParser.TOK_HAVING, "TOK_HAVING").add(cond).node(); + } + + static ASTNode limit(Object value) { + return ASTBuilder.construct(HiveParser.TOK_LIMIT, "TOK_LIMIT") + .add(HiveParser.Number, value.toString()).node(); + } + + static ASTNode selectExpr(ASTNode expr, String alias) { + return ASTBuilder.construct(HiveParser.TOK_SELEXPR, "TOK_SELEXPR").add(expr) + .add(HiveParser.Identifier, alias).node(); + } + + static ASTNode literal(RexLiteral literal) { + Object val = null; + int type = 0; + SqlTypeName sqlType = literal.getType().getSqlTypeName(); + + switch (sqlType) { + case TINYINT: + val = literal.getValue3(); + type = HiveParser.TinyintLiteral; + break; + case SMALLINT: + val = literal.getValue3(); + type = HiveParser.SmallintLiteral; + break; + case INTEGER: + case BIGINT: + val = literal.getValue3(); + type = HiveParser.BigintLiteral; + break; + case DECIMAL: + case FLOAT: + case DOUBLE: + case REAL: + val = literal.getValue3(); + type = HiveParser.Number; + break; + case VARCHAR: + case CHAR: + val = literal.getValue3(); + type = HiveParser.StringLiteral; + val = "'" + String.valueOf(val) + "'"; + break; + case BOOLEAN: + val = literal.getValue3(); + type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE; + break; + case DATE: { + val = literal.getValue(); + type = HiveParser.TOK_DATELITERAL; + DateFormat df = new SimpleDateFormat("yyyy-MM-dd"); + val = df.format(((Calendar) val).getTime()); + val = "'" + val + "'"; + } + break; + case TIME: + case TIMESTAMP: { + val = literal.getValue(); + type = HiveParser.TOK_TIMESTAMP; + DateFormat df = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss"); + val = df.format(((Calendar) val).getTime()); + val = "'" + val + "'"; + } + break; + case NULL: + type = HiveParser.TOK_NULL; + break; + + default: + throw new RuntimeException("Unsupported Type: " + sqlType); + } + + return (ASTNode) ParseDriver.adaptor.create(type, String.valueOf(val)); + } + + ASTNode curr; + + ASTNode node() { + return curr; + } + + ASTBuilder add(int tokenType, String text) { + ParseDriver.adaptor.addChild(curr, createAST(tokenType, text)); + return this; + } + + ASTBuilder add(ASTBuilder b) { + ParseDriver.adaptor.addChild(curr, b.curr); + return this; + } + + ASTBuilder add(ASTNode n) { + if (n != null) { + ParseDriver.adaptor.addChild(curr, n); + } + return this; + } +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java new file mode 100644 index 0000000..cfa032c --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java @@ -0,0 +1,584 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import net.hydromatic.optiq.util.BitSets; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter.HiveToken; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelFieldCollation; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.RelVisitor; +import org.eigenbase.rel.SortRel; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.rel.UnionRelBase; +import org.eigenbase.rel.rules.SemiJoinRel; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexFieldCollation; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexOver; +import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.rex.RexWindow; +import org.eigenbase.rex.RexWindowBound; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.type.BasicSqlType; +import org.eigenbase.sql.type.SqlTypeName; + +import com.google.common.collect.Iterables; + +public class ASTConverter { + + RelNode root; + HiveAST hiveAST; + RelNode from; + FilterRelBase where; + AggregateRelBase groupBy; + FilterRelBase having; + ProjectRelBase select; + SortRel order; + + Schema schema; + + ASTConverter(RelNode root) { + this.root = root; + hiveAST = new HiveAST(); + } + + public static ASTNode convert(final RelNode relNode, List resultSchema) { + SortRel sortrel = null; + RelNode root = DerivedTableInjector.convertOpTree(relNode, resultSchema); + + if (root instanceof SortRel) { + sortrel = (SortRel) root; + root = sortrel.getChild(); + if (!(root instanceof ProjectRelBase)) + throw new RuntimeException("Child of root sort node is not a project"); + } + + ASTConverter c = new ASTConverter(root); + return c.convert(sortrel); + } + + public ASTNode convert(SortRel sortrel) { + /* + * 1. Walk RelNode Graph; note from, where, gBy.. nodes. + */ + new QBVisitor().go(root); + + /* + * 2. convert from node. + */ + QueryBlockInfo qb = convertSource(from); + schema = qb.schema; + hiveAST.from = ASTBuilder.construct(HiveParser.TOK_FROM, "TOK_FROM").add(qb.ast).node(); + + /* + * 3. convert filterNode + */ + if (where != null) { + ASTNode cond = where.getCondition().accept(new RexVisitor(schema)); + hiveAST.where = ASTBuilder.where(cond); + } + + /* + * 4. GBy + */ + if (groupBy != null) { + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_GROUPBY, "TOK_GROUPBY"); + for (int i : BitSets.toIter(groupBy.getGroupSet())) { + RexInputRef iRef = new RexInputRef(i, new BasicSqlType(SqlTypeName.ANY)); + b.add(iRef.accept(new RexVisitor(schema))); + } + + if (!groupBy.getGroupSet().isEmpty()) + hiveAST.groupBy = b.node(); + schema = new Schema(schema, groupBy); + } + + /* + * 5. Having + */ + if (having != null) { + ASTNode cond = having.getCondition().accept(new RexVisitor(schema)); + hiveAST.having = ASTBuilder.having(cond); + } + + /* + * 6. Project + */ + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_SELECT, "TOK_SELECT"); + int i = 0; + + for (RexNode r : select.getChildExps()) { + ASTNode selectExpr = ASTBuilder.selectExpr(r.accept(new RexVisitor(schema)), select + .getRowType().getFieldNames().get(i++)); + b.add(selectExpr); + } + hiveAST.select = b.node(); + + /* + * 7. Order Use in Order By from the block above. RelNode has no pointer to + * parent hence we need to go top down; but OB at each block really belong + * to its src/from. Hence the need to pass in sortRel for each block from + * its parent. + */ + if (sortrel != null) { + HiveSortRel hiveSort = (HiveSortRel) sortrel; + if (!hiveSort.getCollation().getFieldCollations().isEmpty()) { + ASTNode orderAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); + schema = new Schema((HiveSortRel) sortrel); + for (RelFieldCollation c : hiveSort.getCollation().getFieldCollations()) { + ColumnInfo cI = schema.get(c.getFieldIndex()); + /* + * The RowResolver setup for Select drops Table associations. So setup + * ASTNode on unqualified name. + */ + ASTNode astCol = ASTBuilder.unqualifiedName(cI.column); + ASTNode astNode = c.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + astNode.addChild(astCol); + orderAst.addChild(astNode); + } + hiveAST.order = orderAst; + } + RexNode limitExpr = hiveSort.getFetchExpr(); + if (limitExpr != null) { + Object val = ((RexLiteral) limitExpr).getValue2(); + hiveAST.limit = ASTBuilder.limit(val); + } + + } + + return hiveAST.getAST(); + } + + private Schema getRowSchema(String tblAlias) { + return new Schema(select, tblAlias); + } + + private QueryBlockInfo convertSource(RelNode r) { + Schema s; + ASTNode ast; + + if (r instanceof TableAccessRelBase) { + TableAccessRelBase f = (TableAccessRelBase) r; + s = new Schema(f); + ast = ASTBuilder.table(f); + } else if (r instanceof JoinRelBase) { + JoinRelBase join = (JoinRelBase) r; + QueryBlockInfo left = convertSource(join.getLeft()); + QueryBlockInfo right = convertSource(join.getRight()); + s = new Schema(left.schema, right.schema); + ASTNode cond = join.getCondition().accept(new RexVisitor(s)); + boolean semiJoin = join instanceof SemiJoinRel; + ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond, semiJoin); + if (semiJoin) + s = left.schema; + } else if (r instanceof UnionRelBase) { + RelNode leftInput = ((UnionRelBase) r).getInput(0); + RelNode rightInput = ((UnionRelBase) r).getInput(1); + + ASTConverter leftConv = new ASTConverter(leftInput); + ASTConverter rightConv = new ASTConverter(rightInput); + ASTNode leftAST = leftConv.convert((SortRel) null); + ASTNode rightAST = rightConv.convert((SortRel) null); + + ASTNode unionAST = getUnionAllAST(leftAST, rightAST); + + String sqAlias = ASTConverter.nextAlias(); + ast = ASTBuilder.subQuery(unionAST, sqAlias); + s = new Schema((UnionRelBase) r, sqAlias); + } else { + ASTConverter src = new ASTConverter(r); + ASTNode srcAST = src.convert(order); + String sqAlias = ASTConverter.nextAlias(); + s = src.getRowSchema(sqAlias); + ast = ASTBuilder.subQuery(srcAST, sqAlias); + } + return new QueryBlockInfo(s, ast); + } + + class QBVisitor extends RelVisitor { + + public void handle(FilterRelBase filter) { + RelNode child = filter.getChild(); + if (child instanceof AggregateRelBase && !((AggregateRelBase) child).getGroupSet().isEmpty()) { + ASTConverter.this.having = filter; + } else { + ASTConverter.this.where = filter; + } + } + + public void handle(ProjectRelBase project) { + if (ASTConverter.this.select == null) { + ASTConverter.this.select = project; + } else { + ASTConverter.this.from = project; + } + } + + @Override + public void visit(RelNode node, int ordinal, RelNode parent) { + + if (node instanceof TableAccessRelBase) { + ASTConverter.this.from = node; + } else if (node instanceof FilterRelBase) { + handle((FilterRelBase) node); + } else if (node instanceof ProjectRelBase) { + handle((ProjectRelBase) node); + } else if (node instanceof JoinRelBase) { + ASTConverter.this.from = node; + } else if (node instanceof UnionRelBase) { + ASTConverter.this.from = node; + } else if (node instanceof AggregateRelBase) { + ASTConverter.this.groupBy = (AggregateRelBase) node; + } else if (node instanceof SortRel) { + ASTConverter.this.order = (SortRel) node; + } + /* + * once the source node is reached; stop traversal for this QB + */ + if (ASTConverter.this.from == null) { + node.childrenAccept(this); + } + } + + } + + static class RexVisitor extends RexVisitorImpl { + + private final Schema schema; + + protected RexVisitor(Schema schema) { + super(true); + this.schema = schema; + } + + @Override + public ASTNode visitInputRef(RexInputRef inputRef) { + ColumnInfo cI = schema.get(inputRef.getIndex()); + if (cI.agg != null) { + return (ASTNode) ParseDriver.adaptor.dupTree(cI.agg); + } + return ASTBuilder.qualifiedName(cI.table, cI.column); + } + + @Override + public ASTNode visitLiteral(RexLiteral literal) { + return ASTBuilder.literal(literal); + } + + private ASTNode getPSpecAST(RexWindow window) { + ASTNode pSpecAst = null; + + ASTNode dByAst = null; + if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) { + dByAst = ASTBuilder.createAST(HiveParser.TOK_DISTRIBUTEBY, "TOK_DISTRIBUTEBY"); + for (RexNode pk : window.partitionKeys) { + ASTNode astCol = pk.accept(this); + dByAst.addChild(astCol); + } + } + + ASTNode oByAst = null; + if (window.orderKeys != null && !window.orderKeys.isEmpty()) { + oByAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); + for (RexFieldCollation ok : window.orderKeys) { + ASTNode astNode = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + ASTNode astCol = ok.left.accept(this); + astNode.addChild(astCol); + oByAst.addChild(astNode); + } + } + + if (dByAst != null || oByAst != null) { + pSpecAst = ASTBuilder.createAST(HiveParser.TOK_PARTITIONINGSPEC, "TOK_PARTITIONINGSPEC"); + if (dByAst != null) + pSpecAst.addChild(dByAst); + if (oByAst != null) + pSpecAst.addChild(oByAst); + } + + return pSpecAst; + } + + private ASTNode getWindowBound(RexWindowBound wb) { + ASTNode wbAST = null; + + if (wb.isCurrentRow()) { + wbAST = ASTBuilder.createAST(HiveParser.KW_CURRENT, "CURRENT"); + } else { + if (wb.isPreceding()) + wbAST = ASTBuilder.createAST(HiveParser.KW_PRECEDING, "PRECEDING"); + else + wbAST = ASTBuilder.createAST(HiveParser.KW_FOLLOWING, "FOLLOWING"); + if (wb.isUnbounded()) { + wbAST.addChild(ASTBuilder.createAST(HiveParser.KW_UNBOUNDED, "UNBOUNDED")); + } else { + ASTNode offset = wb.getOffset().accept(this); + wbAST.addChild(offset); + } + } + + return wbAST; + } + + private ASTNode getWindowRangeAST(RexWindow window) { + ASTNode wRangeAst = null; + + ASTNode startAST = null; + RexWindowBound ub = window.getUpperBound(); + if (ub != null) { + startAST = getWindowBound(ub); + } + + ASTNode endAST = null; + RexWindowBound lb = window.getLowerBound(); + if (lb != null) { + endAST = getWindowBound(lb); + } + + if (startAST != null || endAST != null) { + // NOTE: in Hive AST Rows->Range(Physical) & Range -> Values (logical) + if (window.isRows()) + wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWRANGE, "TOK_WINDOWRANGE"); + else + wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWVALUES, "TOK_WINDOWVALUES"); + if (startAST != null) + wRangeAst.addChild(startAST); + if (endAST != null) + wRangeAst.addChild(endAST); + } + + return wRangeAst; + } + + @Override + public ASTNode visitOver(RexOver over) { + if (!deep) { + return null; + } + + // 1. Translate the UDAF + final ASTNode wUDAFAst = visitCall(over); + + // 2. Add TOK_WINDOW as child of UDAF + ASTNode wSpec = ASTBuilder.createAST(HiveParser.TOK_WINDOWSPEC, "TOK_WINDOWSPEC"); + wUDAFAst.addChild(wSpec); + + // 3. Add Part Spec & Range Spec as child of TOK_WINDOW + final RexWindow window = over.getWindow(); + final ASTNode wPSpecAst = getPSpecAST(window); + final ASTNode wRangeAst = getWindowRangeAST(window); + if (wPSpecAst != null) + wSpec.addChild(wPSpecAst); + if (wRangeAst != null) + wSpec.addChild(wRangeAst); + + return wUDAFAst; + } + + @Override + public ASTNode visitCall(RexCall call) { + if (!deep) { + return null; + } + + SqlOperator op = call.getOperator(); + List astNodeLst = new LinkedList(); + if (op.kind == SqlKind.CAST) { + HiveToken ht = TypeConverter.hiveToken(call.getType()); + ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); + if (ht.args != null) { + for (String castArg : ht.args) + astBldr.add(HiveParser.Identifier, castArg); + } + astNodeLst.add(astBldr.node()); + } + + for (RexNode operand : call.operands) { + astNodeLst.add(operand.accept(this)); + } + + if (isFlat(call)) + return SqlFunctionConverter.buildAST(op, astNodeLst, 0); + else + return SqlFunctionConverter.buildAST(op, astNodeLst); + } + } + + static class QueryBlockInfo { + Schema schema; + ASTNode ast; + + public QueryBlockInfo(Schema schema, ASTNode ast) { + super(); + this.schema = schema; + this.ast = ast; + } + } + + /* + * represents the schema exposed by a QueryBlock. + */ + static class Schema extends ArrayList { + + private static final long serialVersionUID = 1L; + + Schema(TableAccessRelBase scan) { + String tabName = scan.getTable().getQualifiedName().get(0); + for (RelDataTypeField field : scan.getRowType().getFieldList()) { + add(new ColumnInfo(tabName, field.getName())); + } + } + + Schema(ProjectRelBase select, String alias) { + for (RelDataTypeField field : select.getRowType().getFieldList()) { + add(new ColumnInfo(alias, field.getName())); + } + } + + Schema(UnionRelBase unionRel, String alias) { + for (RelDataTypeField field : unionRel.getRowType().getFieldList()) { + add(new ColumnInfo(alias, field.getName())); + } + } + + @SuppressWarnings("unchecked") + Schema(Schema left, Schema right) { + for (ColumnInfo cI : Iterables.concat(left, right)) { + add(cI); + } + } + + Schema(Schema src, AggregateRelBase gBy) { + for (int i : BitSets.toIter(gBy.getGroupSet())) { + ColumnInfo cI = src.get(i); + add(cI); + } + List aggs = gBy.getAggCallList(); + for (AggregateCall agg : aggs) { + int argCount = agg.getArgList().size(); + ASTBuilder b = agg.isDistinct() ? ASTBuilder.construct(HiveParser.TOK_FUNCTIONDI, + "TOK_FUNCTIONDI") : argCount == 0 ? ASTBuilder.construct(HiveParser.TOK_FUNCTIONSTAR, + "TOK_FUNCTIONSTAR") : ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); + b.add(HiveParser.Identifier, agg.getAggregation().getName()); + for (int i : agg.getArgList()) { + RexInputRef iRef = new RexInputRef(i, new BasicSqlType(SqlTypeName.ANY)); + b.add(iRef.accept(new RexVisitor(src))); + } + add(new ColumnInfo(null, b.node())); + } + } + + /** + * Assumption:
+ * 1. ProjectRel will always be child of SortRel.
+ * 2. In Optiq every projection in ProjectRelBase is uniquely named + * (unambigous) without using table qualifier (table name).
+ * + * @param order + * Hive Sort Rel Node + * @return Schema + */ + public Schema(HiveSortRel order) { + ProjectRelBase select = (ProjectRelBase) order.getChild(); + for (String projName : select.getRowType().getFieldNames()) { + add(new ColumnInfo(null, projName)); + } + } + } + + /* + * represents Column information exposed by a QueryBlock. + */ + static class ColumnInfo { + String table; + String column; + ASTNode agg; + + ColumnInfo(String table, String column) { + super(); + this.table = table; + this.column = column; + } + + ColumnInfo(String table, ASTNode agg) { + super(); + this.table = table; + this.agg = agg; + } + + ColumnInfo(String alias, ColumnInfo srcCol) { + this.table = alias; + this.column = srcCol.column; + this.agg = srcCol.agg; + } + } + + static String nextAlias() { + return String.format("$hdt$_%d", derivedTableCounter.getAndIncrement()); + } + + private static AtomicLong derivedTableCounter = new AtomicLong(0); + + static class HiveAST { + + ASTNode from; + ASTNode where; + ASTNode groupBy; + ASTNode having; + ASTNode select; + ASTNode order; + ASTNode limit; + + public ASTNode getAST() { + ASTBuilder b = ASTBuilder + .construct(HiveParser.TOK_QUERY, "TOK_QUERY") + .add(from) + .add( + ASTBuilder.construct(HiveParser.TOK_INSERT, "TOK_INSERT").add(ASTBuilder.destNode()) + .add(select).add(where).add(groupBy).add(having).add(order).add(limit)); + return b.node(); + } + } + + public ASTNode getUnionAllAST(ASTNode leftAST, ASTNode rightAST) { + + ASTNode unionTokAST = ASTBuilder.construct(HiveParser.TOK_UNION, "TOK_UNION").add(leftAST) + .add(rightAST).node(); + + return unionTokAST; + } + + public static boolean isFlat(RexCall call) { + boolean flat = false; + if (call.operands != null && call.operands.size() > 2) { + SqlOperator op = call.getOperator(); + if (op.getKind() == SqlKind.AND || op.getKind() == SqlKind.OR) { + flat = true; + } + } + + return flat; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/DerivedTableInjector.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/DerivedTableInjector.java new file mode 100644 index 0000000..dd2bf22 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/DerivedTableInjector.java @@ -0,0 +1,254 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.EmptyRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.OneRowRelBase; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SetOpRel; +import org.eigenbase.rel.SingleRel; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.rel.TableFunctionRelBase; +import org.eigenbase.rel.UnionRelBase; +import org.eigenbase.rel.ValuesRelBase; +import org.eigenbase.rel.rules.MultiJoinRel; +import org.eigenbase.relopt.hep.HepRelVertex; +import org.eigenbase.relopt.volcano.RelSubset; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; + +public class DerivedTableInjector { + + public static RelNode convertOpTree(RelNode rel, List resultSchema) { + // Disable introducing top level select since Hive seems to have bugs with + // OB, Limit in sub query. + // RelNode newTopSelect = introduceTopLevelSelectInResultSchema(rel, + // resultSchema); + RelNode newTopNode = rel; + + // NOTE: Hive requires Union to buried in Project (TOK_QUERY, + // TOK_SUBQUERY, TOK_UNION) + if (newTopNode instanceof UnionRelBase) { + newTopNode = introduceDerivedTable(newTopNode); + } + + convertOpTree(newTopNode, (RelNode) null); + + return newTopNode; + } + + private static void convertOpTree(RelNode rel, RelNode parent) { + + if (rel instanceof EmptyRel) { + throw new RuntimeException("Found Empty Rel"); + } else if (rel instanceof HepRelVertex) { + throw new RuntimeException("Found HepRelVertex"); + } else if (rel instanceof JoinRelBase) { + if (!validJoinParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + } else if (rel instanceof MultiJoinRel) { + throw new RuntimeException("Found MultiJoinRel"); + } else if (rel instanceof OneRowRelBase) { + throw new RuntimeException("Found OneRowRelBase"); + } else if (rel instanceof RelSubset) { + throw new RuntimeException("Found RelSubset"); + } else if (rel instanceof SetOpRel) { + // TODO: Handle more than 2 inputs for setop + if (!validSetopParent(rel, parent)) + introduceDerivedTable(rel, parent); + + SetOpRel setopRel = (SetOpRel) rel; + for (RelNode inputRel : setopRel.getInputs()) { + if (!validSetopChild(inputRel)) { + introduceDerivedTable(inputRel, setopRel); + } + } + } else if (rel instanceof SingleRel) { + if (rel instanceof FilterRelBase) { + if (!validFilterParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + } else if (rel instanceof HiveSortRel) { + if (!validSortParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + if (!validSortChild((HiveSortRel) rel)) { + introduceDerivedTable(((HiveSortRel) rel).getChild(), rel); + } + } else if (rel instanceof HiveAggregateRel) { + if (!validGBParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + } + } else if (rel instanceof TableAccessRelBase) { + + } else if (rel instanceof TableFunctionRelBase) { + + } else if (rel instanceof ValuesRelBase) { + + } + + List childNodes = rel.getInputs(); + if (childNodes != null) { + for (RelNode r : childNodes) { + convertOpTree(r, rel); + } + } + } + + private static HiveProjectRel introduceTopLevelSelectInResultSchema(final RelNode rootRel, + List resultSchema) { + RelNode curNode = rootRel; + HiveProjectRel rootProjRel = null; + while (curNode != null) { + if (curNode instanceof HiveProjectRel) { + rootProjRel = (HiveProjectRel) curNode; + break; + } + curNode = curNode.getInput(0); + } + + // Assumption: tree could only be (limit)?(OB)?(ProjectRelBase).... + List rootChildExps = rootProjRel.getChildExps(); + if (resultSchema.size() != rootChildExps.size()) { + throw new RuntimeException("Result Schema didn't match Optiq Optimized Op Tree Schema"); + } + + List newSelExps = new ArrayList(); + List newSelAliases = new ArrayList(); + for (int i = 0; i < rootChildExps.size(); i++) { + newSelExps.add(new RexInputRef(i, rootChildExps.get(i).getType())); + newSelAliases.add(resultSchema.get(i).getName()); + } + + return HiveProjectRel.create(rootRel, newSelExps, newSelAliases); + } + + private static RelNode introduceDerivedTable(final RelNode rel) { + List projectList = HiveOptiqUtil.getProjsFromBelowAsInputRef(rel); + + HiveProjectRel select = HiveProjectRel.create(rel.getCluster(), rel, projectList, + rel.getRowType(), rel.getCollationList()); + + return select; + } + + private static void introduceDerivedTable(final RelNode rel, RelNode parent) { + int i = 0; + int pos = -1; + List childList = parent.getInputs(); + + for (RelNode child : childList) { + if (child == rel) { + pos = i; + break; + } + i++; + } + + if (pos == -1) { + throw new RuntimeException("Couldn't find child node in parent's inputs"); + } + + RelNode select = introduceDerivedTable(rel); + + parent.replaceInput(pos, select); + } + + private static boolean validJoinParent(RelNode joinNode, RelNode parent) { + boolean validParent = true; + + if (parent instanceof JoinRelBase) { + if (((JoinRelBase) parent).getRight() == joinNode) { + validParent = false; + } + } else if (parent instanceof SetOpRel) { + validParent = false; + } + + return validParent; + } + + private static boolean validFilterParent(RelNode filterNode, RelNode parent) { + boolean validParent = true; + + // TOODO: Verify GB having is not a seperate filter (if so we shouldn't + // introduce derived table) + if (parent instanceof FilterRelBase || parent instanceof JoinRelBase + || parent instanceof SetOpRel) { + validParent = false; + } + + return validParent; + } + + private static boolean validGBParent(RelNode gbNode, RelNode parent) { + boolean validParent = true; + + // TOODO: Verify GB having is not a seperate filter (if so we shouldn't + // introduce derived table) + if (parent instanceof JoinRelBase || parent instanceof SetOpRel + || parent instanceof AggregateRelBase + || (parent instanceof FilterRelBase && ((AggregateRelBase) gbNode).getGroupSet().isEmpty())) { + validParent = false; + } + + return validParent; + } + + private static boolean validSortParent(RelNode sortNode, RelNode parent) { + boolean validParent = true; + + if (parent != null && !(parent instanceof ProjectRelBase)) { + validParent = false; + } + + return validParent; + } + + private static boolean validSortChild(HiveSortRel sortNode) { + boolean validChild = true; + RelNode child = sortNode.getChild(); + + if (!(child instanceof ProjectRelBase)) { + validChild = false; + } + + return validChild; + } + + private static boolean validSetopParent(RelNode setop, RelNode parent) { + boolean validChild = true; + + if (parent != null && !(parent instanceof ProjectRelBase)) { + validChild = false; + } + + return validChild; + } + + private static boolean validSetopChild(RelNode setopChild) { + boolean validChild = true; + + if (!(setopChild instanceof ProjectRelBase)) { + validChild = false; + } + + return validChild; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java new file mode 100644 index 0000000..5636919 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.hive.common.type.HiveChar; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexVisitorImpl; + +/* + * convert a RexNode to an ExprNodeDesc + */ +public class ExprNodeConverter extends RexVisitorImpl { + + RelDataType rType; + String tabAlias; + boolean partitioningExpr; + + public ExprNodeConverter(String tabAlias, RelDataType rType, boolean partitioningExpr) { + super(true); + /* + * hb: 6/25/14 for now we only support expressions that only contain + * partition cols. there is no use case for supporting generic expressions. + * for supporting generic exprs., we need to give the converter information + * on whether a column is a partition column or not, whether a column is a + * virtual column or not. + */ + assert partitioningExpr == true; + this.tabAlias = tabAlias; + this.rType = rType; + this.partitioningExpr = partitioningExpr; + } + + @Override + public ExprNodeDesc visitInputRef(RexInputRef inputRef) { + RelDataTypeField f = rType.getFieldList().get(inputRef.getIndex()); + return new ExprNodeColumnDesc(TypeConverter.convert(f.getType()), f.getName(), tabAlias, + partitioningExpr); + } + + @Override + public ExprNodeDesc visitCall(RexCall call) { + ExprNodeGenericFuncDesc gfDesc = null; + + if (!deep) { + return null; + } + + List args = new LinkedList(); + + for (RexNode operand : call.operands) { + args.add(operand.accept(this)); + } + + // If Expr is flat (and[p,q,r,s] or[p,q,r,s]) then recursively build the + // exprnode + if (ASTConverter.isFlat(call)) { + ArrayList tmpExprArgs = new ArrayList(); + tmpExprArgs.addAll(args.subList(0, 2)); + gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), tmpExprArgs); + for (int i = 2; i < call.operands.size(); i++) { + tmpExprArgs = new ArrayList(); + tmpExprArgs.add(gfDesc); + tmpExprArgs.add(args.get(i)); + gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), tmpExprArgs); + } + } else { + gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), args); + } + + return gfDesc; + } + + @Override + public ExprNodeDesc visitLiteral(RexLiteral literal) { + RelDataType lType = literal.getType(); + + switch (literal.getType().getSqlTypeName()) { + case BOOLEAN: + return new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, Boolean.valueOf(RexLiteral + .booleanValue(literal))); + case TINYINT: + return new ExprNodeConstantDesc(TypeInfoFactory.byteTypeInfo, Byte.valueOf(((Number) literal + .getValue3()).byteValue())); + case SMALLINT: + return new ExprNodeConstantDesc(TypeInfoFactory.shortTypeInfo, + Short.valueOf(((Number) literal.getValue3()).shortValue())); + case INTEGER: + return new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, + Integer.valueOf(((Number) literal.getValue3()).intValue())); + case BIGINT: + return new ExprNodeConstantDesc(TypeInfoFactory.longTypeInfo, Long.valueOf(((Number) literal + .getValue3()).longValue())); + case FLOAT: + return new ExprNodeConstantDesc(TypeInfoFactory.floatTypeInfo, + Float.valueOf(((Number) literal.getValue3()).floatValue())); + case DOUBLE: + return new ExprNodeConstantDesc(TypeInfoFactory.doubleTypeInfo, + Double.valueOf(((Number) literal.getValue3()).doubleValue())); + case DATE: + return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, literal.getValue3()); + case TIMESTAMP: + return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo, literal.getValue3()); + case BINARY: + return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3()); + case DECIMAL: + return new ExprNodeConstantDesc(TypeInfoFactory.getDecimalTypeInfo(lType.getPrecision(), + lType.getScale()), literal.getValue3()); + case VARCHAR: + return new ExprNodeConstantDesc(TypeInfoFactory.getVarcharTypeInfo(lType.getPrecision()), + new HiveVarchar((String) literal.getValue3(), lType.getPrecision())); + case CHAR: + return new ExprNodeConstantDesc(TypeInfoFactory.getCharTypeInfo(lType.getPrecision()), + new HiveChar((String) literal.getValue3(), lType.getPrecision())); + case OTHER: + default: + return new ExprNodeConstantDesc(TypeInfoFactory.voidTypeInfo, literal.getValue3()); + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java new file mode 100644 index 0000000..c051b65 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java @@ -0,0 +1,697 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.Stack; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FilterOperator; +import org.apache.hadoop.hive.ql.exec.GroupByOperator; +import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.exec.LimitOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.RowSchema; +import org.apache.hadoop.hive.ql.exec.SelectOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.ForwardWalker; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.QBJoinTree; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.JoinCondDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.Aggregation; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelCollationImpl; +import org.eigenbase.rel.RelFieldCollation; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptSchema; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.util.CompositeList; +import org.eigenbase.util.Pair; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; + +public class RelNodeConverter { + private static final Map AGG_MAP = ImmutableMap + . builder() + .put( + "count", + SqlStdOperatorTable.COUNT) + .put("sum", SqlStdOperatorTable.SUM) + .put("min", SqlStdOperatorTable.MIN) + .put("max", SqlStdOperatorTable.MAX) + .put("avg", SqlStdOperatorTable.AVG) + .build(); + + public static RelNode convert(Operator sinkOp, RelOptCluster cluster, + RelOptSchema schema, SemanticAnalyzer sA, ParseContext pCtx) { + + Context ctx = new Context(cluster, schema, sA, pCtx); + + Map rules = ImmutableMap + . builder() + .put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), + new TableScanProcessor()) + .put(new RuleRegExp("R2", FilterOperator.getOperatorName() + "%"), new FilterProcessor()) + .put(new RuleRegExp("R3", SelectOperator.getOperatorName() + "%"), new SelectProcessor()) + .put(new RuleRegExp("R4", JoinOperator.getOperatorName() + "%"), new JoinProcessor()) + .put(new RuleRegExp("R5", LimitOperator.getOperatorName() + "%"), new LimitProcessor()) + .put(new RuleRegExp("R6", GroupByOperator.getOperatorName() + "%"), new GroupByProcessor()) + .put(new RuleRegExp("R7", ReduceSinkOperator.getOperatorName() + "%"), + new ReduceSinkProcessor()).build(); + + Dispatcher disp = new DefaultRuleDispatcher(new DefaultProcessor(), rules, ctx); + GraphWalker egw = new ForwardWalker(disp); + + ArrayList topNodes = new ArrayList(); + topNodes.addAll(pCtx.getTopOps().values()); + + HashMap outputMap = new HashMap(); + try { + egw.startWalking(topNodes, outputMap); + } catch (SemanticException se) { + // @revisit + throw new RuntimeException(se); + } + return (HiveRel) outputMap.get(sinkOp); + } + + static class Context implements NodeProcessorCtx { + RelOptCluster cluster; + RelOptSchema schema; + SemanticAnalyzer sA; + ParseContext parseCtx; + /* + * A Map from hive column internalNames to Optiq positions. A separate map + * for each Operator. + */ + Map> opPositionMap; + + Map, RelNode> hiveOpToRelNode; + + public Context(RelOptCluster cluster, RelOptSchema schema, SemanticAnalyzer sA, + ParseContext parseCtx) { + super(); + this.cluster = cluster; + this.schema = schema; + this.sA = sA; + this.parseCtx = parseCtx; + opPositionMap = new HashMap>(); + hiveOpToRelNode = new HashMap, RelNode>(); + } + + void buildColumnMap(Operator op, RelNode rNode) { + RowSchema rr = op.getSchema(); + ImmutableMap.Builder b = new ImmutableMap.Builder(); + int i = 0; + for (ColumnInfo ci : rr.getSignature()) { + b.put(ci.getInternalName(), i); + i++; + } + opPositionMap.put(rNode, b.build()); + } + + /* + * Why special handling for TableScan? - the RowResolver coming from hive + * for TScan still has all the columns, whereas the Optiq type we build is + * based on the needed columns in the TScanOp. + */ + void buildColumnMap(TableScanOperator tsOp, RelNode rNode) { + RelDataType oType = rNode.getRowType(); + int i = 0; + ImmutableMap.Builder b = new ImmutableMap.Builder(); + for (String fN : oType.getFieldNames()) { + b.put(fN, i); + i++; + } + opPositionMap.put(rNode, b.build()); + } + + Map reducerMap(Map inpMap, ReduceSinkOperator rsOp) { + ImmutableMap.Builder b = new ImmutableMap.Builder(); + Map colExprMap = rsOp.getColumnExprMap(); + for (Map.Entry e : colExprMap.entrySet()) { + String inpCol = ((ExprNodeColumnDesc) e.getValue()).getColumn(); + b.put(e.getKey(), inpMap.get(inpCol)); + } + return b.build(); + } + + /* + * The Optiq JoinRel datatype is formed by combining the columns from its + * input RelNodes. Whereas the Hive RowResolver of the JoinOp contains only + * the columns needed by childOps. + */ + void buildColumnMap(JoinOperator jOp, HiveJoinRel jRel) throws SemanticException { + RowResolver rr = sA.getRowResolver(jOp); + QBJoinTree hTree = parseCtx.getJoinContext().get(jOp); + Map leftMap = opPositionMap.get(jRel.getLeft()); + Map rightMap = opPositionMap.get(jRel.getRight()); + leftMap = reducerMap(leftMap, (ReduceSinkOperator) jOp.getParentOperators().get(0)); + rightMap = reducerMap(rightMap, (ReduceSinkOperator) jOp.getParentOperators().get(1)); + int leftColCount = jRel.getLeft().getRowType().getFieldCount(); + ImmutableMap.Builder b = new ImmutableMap.Builder(); + for (Map.Entry> tableEntry : rr.getRslvMap() + .entrySet()) { + String table = tableEntry.getKey(); + LinkedHashMap cols = tableEntry.getValue(); + Map posMap = leftMap; + int offset = 0; + if (hTree.getRightAliases() != null) { + for (String rAlias : hTree.getRightAliases()) { + if (table.equals(rAlias)) { + posMap = rightMap; + offset = leftColCount; + break; + } + } + } + for (Map.Entry colEntry : cols.entrySet()) { + ColumnInfo ci = colEntry.getValue(); + ExprNodeDesc e = jOp.getColumnExprMap().get(ci.getInternalName()); + String cName = ((ExprNodeColumnDesc) e).getColumn(); + int pos = posMap.get(cName); + + b.put(ci.getInternalName(), pos + offset); + } + } + opPositionMap.put(jRel, b.build()); + } + + void propagatePosMap(RelNode node, RelNode parent) { + opPositionMap.put(node, opPositionMap.get(parent)); + } + + RexNode convertToOptiqExpr(final ExprNodeDesc expr, final RelNode optiqOP, final boolean flatten) + throws SemanticException { + return convertToOptiqExpr(expr, optiqOP, 0, flatten); + } + + RexNode convertToOptiqExpr(final ExprNodeDesc expr, final RelNode optiqOP, int offset, + final boolean flatten) throws SemanticException { + ImmutableMap posMap = opPositionMap.get(optiqOP); + RexNodeConverter c = new RexNodeConverter(cluster, optiqOP.getRowType(), posMap, offset, + flatten); + return c.convert(expr); + } + + RelNode getParentNode(Operator hiveOp, int i) { + Operator p = hiveOp.getParentOperators().get(i); + return p == null ? null : hiveOpToRelNode.get(p); + } + + } + + static class JoinProcessor implements NodeProcessor { + @Override + @SuppressWarnings("unchecked") + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + HiveRel left = (HiveRel) ctx.getParentNode((Operator) nd, 0); + HiveRel right = (HiveRel) ctx.getParentNode((Operator) nd, 1); + JoinOperator joinOp = (JoinOperator) nd; + JoinCondDesc[] jConds = joinOp.getConf().getConds(); + assert jConds.length == 1; + HiveJoinRel joinRel = convertJoinOp(ctx, joinOp, jConds[0], left, right); + ctx.buildColumnMap(joinOp, joinRel); + ctx.hiveOpToRelNode.put(joinOp, joinRel); + return joinRel; + } + + /* + * @todo: cleanup, for now just copied from HiveToOptiqRelConvereter + */ + private HiveJoinRel convertJoinOp(Context ctx, JoinOperator op, JoinCondDesc jc, + HiveRel leftRel, HiveRel rightRel) throws SemanticException { + HiveJoinRel joinRel; + Operator leftParent = op.getParentOperators().get(jc.getLeft()); + Operator rightParent = op.getParentOperators().get(jc.getRight()); + + if (leftParent instanceof ReduceSinkOperator && rightParent instanceof ReduceSinkOperator) { + List leftCols = ((ReduceSinkDesc) (leftParent.getConf())).getKeyCols(); + List rightCols = ((ReduceSinkDesc) (rightParent.getConf())).getKeyCols(); + RexNode joinPredicate = null; + JoinRelType joinType = JoinRelType.INNER; + int rightColOffSet = leftRel.getRowType().getFieldCount(); + + // TODO: what about semi join + switch (jc.getType()) { + case JoinDesc.INNER_JOIN: + joinType = JoinRelType.INNER; + break; + case JoinDesc.LEFT_OUTER_JOIN: + joinType = JoinRelType.LEFT; + break; + case JoinDesc.RIGHT_OUTER_JOIN: + joinType = JoinRelType.RIGHT; + break; + case JoinDesc.FULL_OUTER_JOIN: + joinType = JoinRelType.FULL; + break; + } + + int i = 0; + for (ExprNodeDesc expr : leftCols) { + List eqExpr = new LinkedList(); + eqExpr.add(ctx.convertToOptiqExpr(expr, leftRel, 0, false)); + eqExpr.add(ctx.convertToOptiqExpr(rightCols.get(i), rightRel, rightColOffSet, false)); + + RexNode eqOp = ctx.cluster.getRexBuilder().makeCall(SqlStdOperatorTable.EQUALS, eqExpr); + i++; + + if (joinPredicate == null) { + joinPredicate = eqOp; + } else { + List conjElements = new LinkedList(); + conjElements.add(joinPredicate); + conjElements.add(eqOp); + joinPredicate = ctx.cluster.getRexBuilder().makeCall(SqlStdOperatorTable.AND, + conjElements); + } + } + + // Translate non-joinkey predicate + Set>> filterExprSet = op.getConf().getFilters().entrySet(); + if (!filterExprSet.isEmpty()) { + RexNode eqExpr; + int colOffSet; + RelNode childRel; + Operator parentHiveOp; + int inputId; + + for (Entry> entry : filterExprSet) { + inputId = entry.getKey().intValue(); + if (inputId == 0) { + colOffSet = 0; + childRel = leftRel; + parentHiveOp = leftParent; + } else if (inputId == 1) { + colOffSet = rightColOffSet; + childRel = rightRel; + parentHiveOp = rightParent; + } else { + throw new RuntimeException("Invalid Join Input"); + } + + for (ExprNodeDesc expr : entry.getValue()) { + eqExpr = ctx.convertToOptiqExpr(expr, childRel, colOffSet, false); + List conjElements = new LinkedList(); + conjElements.add(joinPredicate); + conjElements.add(eqExpr); + joinPredicate = ctx.cluster.getRexBuilder().makeCall(SqlStdOperatorTable.AND, + conjElements); + } + } + } + + joinRel = HiveJoinRel.getJoin(ctx.cluster, leftRel, rightRel, joinPredicate, joinType, + false); + } else { + throw new RuntimeException("Right & Left of Join Condition columns are not equal"); + } + + return joinRel; + } + + } + + private static int convertExpr(Context ctx, RelNode input, ExprNodeDesc expr, + List extraExprs) throws SemanticException { + final RexNode rex = ctx.convertToOptiqExpr(expr, input, false); + final int index; + if (rex instanceof RexInputRef) { + index = ((RexInputRef) rex).getIndex(); + } else { + index = input.getRowType().getFieldCount() + extraExprs.size(); + extraExprs.add(rex); + } + return index; + } + + private static AggregateCall convertAgg(Context ctx, AggregationDesc agg, RelNode input, + ColumnInfo cI, List extraExprs) throws SemanticException { + final Aggregation aggregation = AGG_MAP.get(agg.getGenericUDAFName()); + if (aggregation == null) { + throw new AssertionError("agg not found: " + agg.getGenericUDAFName()); + } + + List argList = new ArrayList(); + RelDataType type = TypeConverter.convert(cI.getType(), ctx.cluster.getTypeFactory()); + if (aggregation.equals(SqlStdOperatorTable.AVG)) { + type = type.getField("sum", false).getType(); + } + for (ExprNodeDesc expr : agg.getParameters()) { + int index = convertExpr(ctx, input, expr, extraExprs); + argList.add(index); + } + + /* + * set the type to the first arg, it there is one; because the RTi set on + * Aggregation call assumes this is the output type. + */ + if (argList.size() > 0) { + RexNode rex = ctx.convertToOptiqExpr(agg.getParameters().get(0), input, false); + type = rex.getType(); + } + return new AggregateCall(aggregation, agg.getDistinct(), argList, type, null); + } + + static class FilterProcessor implements NodeProcessor { + @Override + @SuppressWarnings("unchecked") + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + HiveRel input = (HiveRel) ctx.getParentNode((Operator) nd, 0); + FilterOperator filterOp = (FilterOperator) nd; + RexNode convertedFilterExpr = ctx.convertToOptiqExpr(filterOp.getConf().getPredicate(), + input, true); + + // Flatten the condition otherwise Optiq chokes on assertion + // (FilterRelBase) + if (convertedFilterExpr instanceof RexCall) { + RexCall call = (RexCall) convertedFilterExpr; + convertedFilterExpr = ctx.cluster.getRexBuilder().makeCall(call.getType(), + call.getOperator(), RexUtil.flatten(call.getOperands(), call.getOperator())); + } + + HiveRel filtRel = new HiveFilterRel(ctx.cluster, ctx.cluster.traitSetOf(HiveRel.CONVENTION), + input, convertedFilterExpr); + ctx.propagatePosMap(filtRel, input); + ctx.hiveOpToRelNode.put(filterOp, filtRel); + return filtRel; + } + } + + static class SelectProcessor implements NodeProcessor { + @Override + @SuppressWarnings("unchecked") + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + HiveRel inputRelNode = (HiveRel) ctx.getParentNode((Operator) nd, 0); + SelectOperator selectOp = (SelectOperator) nd; + + List colLst = selectOp.getConf().getColList(); + List optiqColLst = new LinkedList(); + + for (ExprNodeDesc colExpr : colLst) { + optiqColLst.add(ctx.convertToOptiqExpr(colExpr, inputRelNode, false)); + } + + /* + * Hive treats names that start with '_c' as internalNames; so change the + * names so we don't run into this issue when converting back to Hive AST. + */ + List oFieldNames = Lists.transform(selectOp.getConf().getOutputColumnNames(), + new Function() { + @Override + public String apply(String hName) { + return "_o_" + hName; + } + }); + + HiveRel selRel = HiveProjectRel.create(inputRelNode, optiqColLst, oFieldNames); + ctx.buildColumnMap(selectOp, selRel); + ctx.hiveOpToRelNode.put(selectOp, selRel); + return selRel; + } + } + + static class LimitProcessor implements NodeProcessor { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + HiveRel input = (HiveRel) ctx.getParentNode((Operator) nd, 0); + LimitOperator limitOp = (LimitOperator) nd; + + // in Optiq, a limit is represented as a sort on 0 columns + final RexNode fetch; + if (limitOp.getConf().getLimit() >= 0) { + fetch = ctx.cluster.getRexBuilder().makeExactLiteral( + BigDecimal.valueOf(limitOp.getConf().getLimit())); + } else { + fetch = null; + } + RelTraitSet traitSet = ctx.cluster.traitSetOf(HiveRel.CONVENTION); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.EMPTY); + HiveRel sortRel = new HiveSortRel(ctx.cluster, traitSet, input, canonizedCollation, null, + fetch); + ctx.propagatePosMap(sortRel, input); + ctx.hiveOpToRelNode.put(limitOp, sortRel); + return sortRel; + } + } + + static class GroupByProcessor implements NodeProcessor { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + + HiveRel input = (HiveRel) ctx.getParentNode((Operator) nd, 0); + GroupByOperator groupByOp = (GroupByOperator) nd; + RowResolver rr = ctx.sA.getRowResolver(groupByOp); + ArrayList signature = rr.getRowSchema().getSignature(); + + // GroupBy is represented by two operators, one map side and one reduce + // side. We only translate the map-side one. + if (groupByOp.getParentOperators().get(0) instanceof ReduceSinkOperator) { + ctx.hiveOpToRelNode.put(groupByOp, input); + return input; + } + + final List extraExprs = Lists.newArrayList(); + final BitSet groupSet = new BitSet(); + for (ExprNodeDesc key : groupByOp.getConf().getKeys()) { + int index = convertExpr(ctx, input, key, extraExprs); + groupSet.set(index); + } + List aggregateCalls = Lists.newArrayList(); + int i = groupByOp.getConf().getKeys().size(); + for (AggregationDesc agg : groupByOp.getConf().getAggregators()) { + aggregateCalls.add(convertAgg(ctx, agg, input, signature.get(i++), extraExprs)); + } + + if (!extraExprs.isEmpty()) { + // noinspection unchecked + input = HiveProjectRel.create(input, CompositeList.of(Lists.transform(input.getRowType() + .getFieldList(), new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }), extraExprs), null); + } + try { + HiveRel aggregateRel = new HiveAggregateRel(ctx.cluster, + ctx.cluster.traitSetOf(HiveRel.CONVENTION), input, groupSet, aggregateCalls); + ctx.buildColumnMap(groupByOp, aggregateRel); + ctx.hiveOpToRelNode.put(groupByOp, aggregateRel); + return aggregateRel; + } catch (InvalidRelException e) { + throw new AssertionError(e); // not possible + } + } + } + + static class ReduceSinkProcessor implements NodeProcessor { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + HiveRel input = (HiveRel) ctx.getParentNode((Operator) nd, 0); + ReduceSinkOperator sinkOp = (ReduceSinkOperator) nd; + + // It is a sort reducer if and only if the number of reducers is 1. + final ReduceSinkDesc conf = sinkOp.getConf(); + if (conf.getNumReducers() != 1) { + Operator op = (Operator) nd; + ctx.hiveOpToRelNode.put(op, input); + return input; + } + + final String order = conf.getOrder(); // "+-" means "ASC, DESC" + assert order.length() == conf.getKeyCols().size(); + + /* + * numReducers == 1 and order.length = 1 => a RS for CrossJoin. + */ + if (order.length() == 0) { + Operator op = (Operator) nd; + ctx.hiveOpToRelNode.put(op, input); + return input; + } + + final List fieldCollations = Lists.newArrayList(); + final List extraExprs = Lists.newArrayList(); + for (Pair pair : Pair.zip(conf.getKeyCols(), + Lists.charactersOf(order))) { + int index = convertExpr(ctx, input, pair.left, extraExprs); + RelFieldCollation.Direction direction = getDirection(pair.right); + fieldCollations.add(new RelFieldCollation(index, direction)); + } + + if (!extraExprs.isEmpty()) { + // noinspection unchecked + input = HiveProjectRel.create(input, CompositeList.of(Lists.transform(input.getRowType() + .getFieldList(), new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }), extraExprs), null); + } + + RelTraitSet traitSet = ctx.cluster.traitSetOf(HiveRel.CONVENTION); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations)); + HiveRel sortRel = new HiveSortRel(ctx.cluster, traitSet, input, canonizedCollation, null, + null); + ctx.propagatePosMap(sortRel, input); + ctx.hiveOpToRelNode.put(sinkOp, sortRel); + + // REVIEW: Do we need to remove the columns we added due to extraExprs? + + return sortRel; + } + + private RelFieldCollation.Direction getDirection(char c) { + switch (c) { + case '+': + return RelFieldCollation.Direction.ASCENDING; + case '-': + return RelFieldCollation.Direction.DESCENDING; + default: + throw new AssertionError("unexpected direction " + c); + } + } + } + + static class TableScanProcessor implements NodeProcessor { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + Context ctx = (Context) procCtx; + TableScanOperator tableScanOp = (TableScanOperator) nd; + RowResolver rr = ctx.sA.getRowResolver(tableScanOp); + + List neededCols = new ArrayList(tableScanOp.getNeededColumns()); + Statistics stats = tableScanOp.getStatistics(); + + try { + stats = addPartitionColumns(ctx, tableScanOp, tableScanOp.getConf().getAlias(), + ctx.sA.getTable(tableScanOp), stats, neededCols); + } catch (CloneNotSupportedException ce) { + throw new SemanticException(ce); + } + + if (stats.getColumnStats().size() != neededCols.size()) { + throw new SemanticException("Incomplete Col stats for table: " + + tableScanOp.getConf().getAlias()); + } + RelDataType rowType = TypeConverter.getType(ctx.cluster, rr, neededCols); + RelOptHiveTable optTable = new RelOptHiveTable(ctx.schema, tableScanOp.getConf().getAlias(), + rowType, ctx.sA.getTable(tableScanOp), null, null, null, null, null); + TableAccessRelBase tableRel = new HiveTableScanRel(ctx.cluster, + ctx.cluster.traitSetOf(HiveRel.CONVENTION), optTable, rowType); + ctx.buildColumnMap(tableScanOp, tableRel); + ctx.hiveOpToRelNode.put(tableScanOp, tableRel); + return tableRel; + } + + /* + * Add partition columns to needed columns and fake the COlStats for it. + */ + private Statistics addPartitionColumns(Context ctx, TableScanOperator tableScanOp, + String tblAlias, Table tbl, Statistics stats, List neededCols) + throws CloneNotSupportedException { + if (!tbl.isPartitioned()) { + return stats; + } + List pStats = new ArrayList(); + List pCols = tbl.getPartCols(); + for (FieldSchema pC : pCols) { + neededCols.add(pC.getName()); + ColStatistics cStats = stats.getColumnStatisticsForColumn(tblAlias, pC.getName()); + if (cStats == null) { + PrunedPartitionList partList = ctx.parseCtx.getOpToPartList().get(tableScanOp); + cStats = new ColStatistics(tblAlias, pC.getName(), pC.getType()); + cStats.setCountDistint(partList.getPartitions().size()); + pStats.add(cStats); + } + } + if (pStats.size() > 0) { + stats = stats.clone(); + stats.addToColumnStats(pStats); + } + + return stats; + } + } + + static class DefaultProcessor implements NodeProcessor { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + @SuppressWarnings("unchecked") + Operator op = (Operator) nd; + Context ctx = (Context) procCtx; + RelNode node = ctx.getParentNode(op, 0); + ctx.hiveOpToRelNode.put(op, node); + return node; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java new file mode 100644 index 0000000..49240b1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java @@ -0,0 +1,331 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveChar; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseNumeric; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFTimestamp; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar; +import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.fun.SqlCastFunction; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.ImmutableMap; + +public class RexNodeConverter { + + private static class InputCtx { + private final RelDataType m_optiqInpDataType; + private final ImmutableMap m_hiveNameToPosMap; + private final RowResolver m_hiveRR; + private final int m_offsetInOptiqSchema; + + private InputCtx(RelDataType optiqInpDataType, ImmutableMap hiveNameToPosMap, + RowResolver hiveRR, int offsetInOptiqSchema) { + m_optiqInpDataType = optiqInpDataType; + m_hiveNameToPosMap = hiveNameToPosMap; + m_hiveRR = hiveRR; + m_offsetInOptiqSchema = offsetInOptiqSchema; + } + }; + + private final RelOptCluster m_cluster; + private final ImmutableList m_inputCtxs; + private final boolean m_flattenExpr; + + public RexNodeConverter(RelOptCluster cluster, RelDataType inpDataType, + ImmutableMap nameToPosMap, int offset, boolean flattenExpr) { + this.m_cluster = cluster; + m_inputCtxs = ImmutableList.of(new InputCtx(inpDataType, nameToPosMap, null, offset)); + m_flattenExpr = flattenExpr; + } + + public RexNodeConverter(RelOptCluster cluster, List inpCtxLst, boolean flattenExpr) { + this.m_cluster = cluster; + m_inputCtxs = ImmutableList. builder().addAll(inpCtxLst).build(); + m_flattenExpr = flattenExpr; + } + + public RexNode convert(ExprNodeDesc expr) throws SemanticException { + if (expr instanceof ExprNodeNullDesc) { + return m_cluster.getRexBuilder().makeNullLiteral( + TypeConverter.convert(expr.getTypeInfo(), m_cluster.getRexBuilder().getTypeFactory()) + .getSqlTypeName()); + } + if (expr instanceof ExprNodeGenericFuncDesc) { + return convert((ExprNodeGenericFuncDesc) expr); + } else if (expr instanceof ExprNodeConstantDesc) { + return convert((ExprNodeConstantDesc) expr); + } else if (expr instanceof ExprNodeColumnDesc) { + return convert((ExprNodeColumnDesc) expr); + } else { + throw new RuntimeException("Unsupported Expression"); + } + // TODO: handle a) ExprNodeFieldDesc b) ExprNodeColumnListDesc + } + + private RexNode convert(final ExprNodeGenericFuncDesc func) throws SemanticException { + ExprNodeDesc tmpExprNode; + RexNode tmpRN; + TypeInfo tgtDT = null; + + List childRexNodeLst = new LinkedList(); + Builder argTypeBldr = ImmutableList. builder(); + + // TODO: 1) Expand to other functions as needed 2) What about types + // other + // than primitive + if (func.getGenericUDF() instanceof GenericUDFBaseNumeric) { + tgtDT = func.getTypeInfo(); + } else if (func.getGenericUDF() instanceof GenericUDFBaseCompare) { + if (func.getChildren().size() == 2) { + tgtDT = FunctionRegistry.getCommonClassForComparison(func.getChildren().get(0) + .getTypeInfo(), func.getChildren().get(1).getTypeInfo()); + } + } + + for (ExprNodeDesc childExpr : func.getChildren()) { + tmpExprNode = childExpr; + if (tgtDT != null + && TypeInfoUtils.isConversionRequiredForComparison(tgtDT, childExpr.getTypeInfo())) { + tmpExprNode = ParseUtils.createConversionCast(childExpr, (PrimitiveTypeInfo) tgtDT); + } + argTypeBldr.add(TypeConverter.convert(tmpExprNode.getTypeInfo(), m_cluster.getTypeFactory())); + tmpRN = convert(tmpExprNode); + childRexNodeLst.add(tmpRN); + } + + // This is an explicit cast + RexNode expr = null; + RelDataType retType = null; + expr = handleExplicitCast(func, childRexNodeLst); + + if (expr == null) { + retType = (expr != null) ? expr.getType() : TypeConverter.convert(func.getTypeInfo(), + m_cluster.getTypeFactory()); + SqlOperator optiqOp = SqlFunctionConverter.getOptiqOperator(func.getGenericUDF(), + argTypeBldr.build(), retType); + expr = m_cluster.getRexBuilder().makeCall(optiqOp, childRexNodeLst); + } else { + retType = expr.getType(); + } + + // TODO: Cast Function in Optiq have a bug where it infertype on cast throws + // an exception + if (m_flattenExpr && (expr instanceof RexCall) + && !(((RexCall) expr).getOperator() instanceof SqlCastFunction)) { + RexCall call = (RexCall) expr; + expr = m_cluster.getRexBuilder().makeCall(retType, call.getOperator(), + RexUtil.flatten(call.getOperands(), call.getOperator())); + } + + return expr; + } + + private boolean castExprUsingUDFBridge(GenericUDF gUDF) { + boolean castExpr = false; + if (gUDF != null && gUDF instanceof GenericUDFBridge) { + String udfClassName = ((GenericUDFBridge) gUDF).getUdfClassName(); + if (udfClassName != null) { + int sp = udfClassName.lastIndexOf('.'); + // TODO: add method to UDFBridge to say if it is a cast func + if (sp >= 0 & (sp + 1) < udfClassName.length()) { + udfClassName = udfClassName.substring(sp + 1); + if (udfClassName.equals("UDFToBoolean") || udfClassName.equals("UDFToByte") + || udfClassName.equals("UDFToDouble") || udfClassName.equals("UDFToInteger") + || udfClassName.equals("UDFToLong") || udfClassName.equals("UDFToShort") + || udfClassName.equals("UDFToFloat") || udfClassName.equals("UDFToString")) + castExpr = true; + } + } + } + + return castExpr; + } + + private RexNode handleExplicitCast(ExprNodeGenericFuncDesc func, List childRexNodeLst) { + RexNode castExpr = null; + + if (childRexNodeLst != null && childRexNodeLst.size() == 1) { + GenericUDF udf = func.getGenericUDF(); + if ((udf instanceof GenericUDFToChar) || (udf instanceof GenericUDFToVarchar) + || (udf instanceof GenericUDFToDecimal) || (udf instanceof GenericUDFToDate) + || (udf instanceof GenericUDFToBinary) || castExprUsingUDFBridge(udf)) { + // || (udf instanceof GenericUDFToUnixTimeStamp) || (udf instanceof + // GenericUDFTimestamp) || castExprUsingUDFBridge(udf)) { + castExpr = m_cluster.getRexBuilder().makeCast( + TypeConverter.convert(func.getTypeInfo(), m_cluster.getTypeFactory()), + childRexNodeLst.get(0)); + } + } + + return castExpr; + } + + private InputCtx getInputCtx(ExprNodeColumnDesc col) throws SemanticException { + InputCtx ctxLookingFor = null; + + if (m_inputCtxs.size() == 1) { + ctxLookingFor = m_inputCtxs.get(0); + } else { + String tableAlias = col.getTabAlias(); + String colAlias = col.getColumn(); + int noInp = 0; + for (InputCtx ic : m_inputCtxs) { + if (tableAlias == null || ic.m_hiveRR.hasTableAlias(tableAlias)) { + if (ic.m_hiveRR.getPosition(colAlias) >= 0) { + ctxLookingFor = ic; + noInp++; + } + } + } + + if (noInp > 1) + throw new RuntimeException("Ambigous column mapping"); + } + + return ctxLookingFor; + } + + protected RexNode convert(ExprNodeColumnDesc col) throws SemanticException { + InputCtx ic = getInputCtx(col); + int pos = ic.m_hiveNameToPosMap.get(col.getColumn()); + return m_cluster.getRexBuilder().makeInputRef( + ic.m_optiqInpDataType.getFieldList().get(pos).getType(), pos + ic.m_offsetInOptiqSchema); + } + + protected RexNode convert(ExprNodeConstantDesc literal) { + RexBuilder rexBuilder = m_cluster.getRexBuilder(); + RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); + PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo(); + RelDataType optiqDataType = TypeConverter.convert(hiveType, dtFactory); + + PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory(); + + ConstantObjectInspector coi = literal.getWritableObjectInspector(); + Object value = ObjectInspectorUtils.copyToStandardJavaObject(literal + .getWritableObjectInspector().getWritableConstantValue(), coi); + + RexNode optiqLiteral = null; + // TODO: Verify if we need to use ConstantObjectInspector to unwrap data + switch (hiveTypeCategory) { + case BOOLEAN: + optiqLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue()); + break; + case BYTE: + optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value)); + break; + case SHORT: + optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value)); + break; + case INT: + optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value)); + break; + case LONG: + optiqLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value)); + break; + // TODO: is Decimal an exact numeric or approximate numeric? + case DECIMAL: + if (value instanceof HiveDecimal) + value = ((HiveDecimal) value).bigDecimalValue(); + if (value instanceof Decimal128) + value = ((Decimal128) value).toBigDecimal(); + optiqLiteral = rexBuilder.makeExactLiteral((BigDecimal) value); + break; + case FLOAT: + optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), optiqDataType); + break; + case DOUBLE: + optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), optiqDataType); + break; + case CHAR: + if (value instanceof HiveChar) + value = ((HiveChar) value).getValue(); + optiqLiteral = rexBuilder.makeLiteral((String) value); + break; + case VARCHAR: + if (value instanceof HiveVarchar) + value = ((HiveVarchar) value).getValue(); + optiqLiteral = rexBuilder.makeLiteral((String) value); + break; + case STRING: + optiqLiteral = rexBuilder.makeLiteral((String) value); + break; + case DATE: + Calendar cal = new GregorianCalendar(); + cal.setTime((Date) value); + optiqLiteral = rexBuilder.makeDateLiteral(cal); + break; + case TIMESTAMP: + optiqLiteral = rexBuilder.makeTimestampLiteral((Calendar) value, + RelDataType.PRECISION_NOT_SPECIFIED); + break; + case BINARY: + case VOID: + case UNKNOWN: + default: + throw new RuntimeException("UnSupported Literal"); + } + + return optiqLiteral; + } + + public static RexNode convert(RelOptCluster cluster, ExprNodeDesc joinCondnExprNode, + List inputRels, LinkedHashMap relToHiveRR, + Map> relToHiveColNameOptiqPosMap, boolean flattenExpr) + throws SemanticException { + List inputCtxLst = new ArrayList(); + + int offSet = 0; + for (RelNode r : inputRels) { + inputCtxLst.add(new InputCtx(r.getRowType(), relToHiveColNameOptiqPosMap.get(r), relToHiveRR + .get(r), offSet)); + offSet += r.getRowType().getFieldCount(); + } + + return (new RexNodeConverter(cluster, inputCtxLst, flattenExpr)).convert(joinCondnExprNode); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java new file mode 100644 index 0000000..c69f7ef --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java @@ -0,0 +1,312 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.lang.annotation.Annotation; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.FunctionInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNegative; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPositive; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.sql.SqlAggFunction; +import org.eigenbase.sql.SqlFunction; +import org.eigenbase.sql.SqlFunctionCategory; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.sql.type.InferTypes; +import org.eigenbase.sql.type.OperandTypes; +import org.eigenbase.sql.type.ReturnTypes; +import org.eigenbase.sql.type.SqlOperandTypeChecker; +import org.eigenbase.sql.type.SqlOperandTypeInference; +import org.eigenbase.sql.type.SqlReturnTypeInference; +import org.eigenbase.sql.type.SqlTypeFamily; +import org.eigenbase.util.Util; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; + +public class SqlFunctionConverter { + static final Map hiveToOptiq; + static final Map optiqToHiveToken; + static final Map reverseOperatorMap; + + static { + Builder builder = new Builder(); + hiveToOptiq = ImmutableMap.copyOf(builder.hiveToOptiq); + optiqToHiveToken = ImmutableMap.copyOf(builder.optiqToHiveToken); + reverseOperatorMap = ImmutableMap.copyOf(builder.reverseOperatorMap); + } + + public static SqlOperator getOptiqOperator(GenericUDF hiveUDF, + ImmutableList optiqArgTypes, RelDataType retType) { + // handle overloaded methods first + if (hiveUDF instanceof GenericUDFOPNegative) { + return SqlStdOperatorTable.UNARY_MINUS; + } else if (hiveUDF instanceof GenericUDFOPPositive) { + return SqlStdOperatorTable.UNARY_PLUS; + } // do genric lookup + return getOptiqFn(getName(hiveUDF), optiqArgTypes, retType); + } + + public static GenericUDF getHiveUDF(SqlOperator op, RelDataType dt) { + String name = reverseOperatorMap.get(op); + if (name == null) + name = op.getName(); + FunctionInfo hFn = name != null ? FunctionRegistry.getFunctionInfo(name) : null; + if (hFn == null) + hFn = handleExplicitCast(op, dt); + return hFn == null ? null : hFn.getGenericUDF(); + } + + private static FunctionInfo handleExplicitCast(SqlOperator op, RelDataType dt) { + FunctionInfo castUDF = null; + + if (op.kind == SqlKind.CAST) { + TypeInfo castType = TypeConverter.convert(dt); + + if (castType.equals(TypeInfoFactory.byteTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("tinyint"); + } else if (castType.equals(TypeInfoFactory.charTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("char"); + } else if (castType.equals(TypeInfoFactory.varcharTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("varchar"); + } else if (castType.equals(TypeInfoFactory.stringTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("string"); + } else if (castType.equals(TypeInfoFactory.booleanTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("boolean"); + } else if (castType.equals(TypeInfoFactory.shortTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("smallint"); + } else if (castType.equals(TypeInfoFactory.intTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("int"); + } else if (castType.equals(TypeInfoFactory.longTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("bigint"); + } else if (castType.equals(TypeInfoFactory.floatTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("float"); + } else if (castType.equals(TypeInfoFactory.doubleTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("double"); + } else if (castType.equals(TypeInfoFactory.timestampTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("timestamp"); + } else if (castType.equals(TypeInfoFactory.dateTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("datetime"); + } else if (castType.equals(TypeInfoFactory.decimalTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("decimal"); + } else if (castType.equals(TypeInfoFactory.binaryTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("binary"); + } + } + + return castUDF; + } + + // TODO: 1) handle Agg Func Name translation 2) is it correct to add func args + // as child of func? + public static ASTNode buildAST(SqlOperator op, List children) { + HiveToken hToken = optiqToHiveToken.get(op); + ASTNode node; + if (hToken != null) { + node = (ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text); + } else { + node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); + if (op.kind != SqlKind.CAST) { + if (op.kind == SqlKind.MINUS_PREFIX) { + node = (ASTNode) ParseDriver.adaptor.create(HiveParser.MINUS, "MINUS"); + } else if (op.kind == SqlKind.PLUS_PREFIX) { + node = (ASTNode) ParseDriver.adaptor.create(HiveParser.PLUS, "PLUS"); + } else { + node.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.Identifier, op.getName())); + } + } + } + + for (ASTNode c : children) { + ParseDriver.adaptor.addChild(node, c); + } + return node; + } + + /** + * Build AST for flattened Associative expressions ('and', 'or'). Flattened + * expressions is of the form or[x,y,z] which is originally represented as + * "or[x, or[y, z]]". + */ + public static ASTNode buildAST(SqlOperator op, List children, int i) { + if (i + 1 < children.size()) { + HiveToken hToken = optiqToHiveToken.get(op); + ASTNode curNode = ((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text)); + ParseDriver.adaptor.addChild(curNode, children.get(i)); + ParseDriver.adaptor.addChild(curNode, buildAST(op, children, i + 1)); + return curNode; + } else { + return children.get(i); + } + + } + + private static String getName(GenericUDF hiveUDF) { + String udfName = null; + if (hiveUDF instanceof GenericUDFBridge) { + udfName = ((GenericUDFBridge) hiveUDF).getUdfName(); + } else { + Class udfClass = hiveUDF.getClass(); + Annotation udfAnnotation = udfClass.getAnnotation(Description.class); + + if (udfAnnotation != null && udfAnnotation instanceof Description) { + Description udfDescription = (Description) udfAnnotation; + udfName = udfDescription.name(); + } + + if (udfName == null || udfName.isEmpty()) { + udfName = hiveUDF.getClass().getName(); + int indx = udfName.lastIndexOf("."); + if (indx >= 0) { + indx += 1; + udfName = udfName.substring(indx); + } + } + } + + return udfName; + } + + private static class Builder { + final Map hiveToOptiq = Maps.newHashMap(); + final Map optiqToHiveToken = Maps.newHashMap(); + final Map reverseOperatorMap = Maps.newHashMap(); + + Builder() { + registerFunction("+", SqlStdOperatorTable.PLUS, hToken(HiveParser.PLUS, "+")); + registerFunction("-", SqlStdOperatorTable.MINUS, hToken(HiveParser.MINUS, "-")); + registerFunction("*", SqlStdOperatorTable.MULTIPLY, hToken(HiveParser.STAR, "*")); + registerFunction("/", SqlStdOperatorTable.DIVIDE, hToken(HiveParser.STAR, "/")); + registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveParser.STAR, "%")); + registerFunction("and", SqlStdOperatorTable.AND, hToken(HiveParser.KW_AND, "and")); + registerFunction("or", SqlStdOperatorTable.OR, hToken(HiveParser.KW_OR, "or")); + registerFunction("=", SqlStdOperatorTable.EQUALS, hToken(HiveParser.EQUAL, "=")); + registerFunction("<", SqlStdOperatorTable.LESS_THAN, hToken(HiveParser.LESSTHAN, "<")); + registerFunction("<=", SqlStdOperatorTable.LESS_THAN_OR_EQUAL, + hToken(HiveParser.LESSTHANOREQUALTO, "<=")); + registerFunction(">", SqlStdOperatorTable.GREATER_THAN, hToken(HiveParser.GREATERTHAN, ">")); + registerFunction(">=", SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, + hToken(HiveParser.GREATERTHANOREQUALTO, ">=")); + registerFunction("!", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not")); + } + + private void registerFunction(String name, SqlOperator optiqFn, HiveToken hiveToken) { + reverseOperatorMap.put(optiqFn, name); + FunctionInfo hFn = FunctionRegistry.getFunctionInfo(name); + if (hFn != null) { + String hFnName = getName(hFn.getGenericUDF()); + hiveToOptiq.put(hFnName, optiqFn); + + if (hiveToken != null) { + optiqToHiveToken.put(optiqFn, hiveToken); + } + } + } + } + + private static HiveToken hToken(int type, String text) { + return new HiveToken(type, text); + } + + public static class OptiqUDAF extends SqlAggFunction { + final ImmutableList m_argTypes; + final RelDataType m_retType; + + public OptiqUDAF(String opName, SqlReturnTypeInference returnTypeInference, + SqlOperandTypeInference operandTypeInference, SqlOperandTypeChecker operandTypeChecker, + ImmutableList argTypes, RelDataType retType) { + super(opName, SqlKind.OTHER_FUNCTION, returnTypeInference, operandTypeInference, + operandTypeChecker, SqlFunctionCategory.USER_DEFINED_FUNCTION); + m_argTypes = argTypes; + m_retType = retType; + } + + @Override + public List getParameterTypes(final RelDataTypeFactory typeFactory) { + return m_argTypes; + } + + @Override + public RelDataType getReturnType(final RelDataTypeFactory typeFactory) { + return m_retType; + } + } + + private static class OptiqUDFInfo { + private String m_udfName; + private SqlReturnTypeInference m_returnTypeInference; + private SqlOperandTypeInference m_operandTypeInference; + private SqlOperandTypeChecker m_operandTypeChecker; + private ImmutableList m_argTypes; + private RelDataType m_retType; + } + + private static OptiqUDFInfo getUDFInfo(String hiveUdfName, + ImmutableList optiqArgTypes, RelDataType optiqRetType) { + OptiqUDFInfo udfInfo = new OptiqUDFInfo(); + udfInfo.m_udfName = hiveUdfName; + udfInfo.m_returnTypeInference = ReturnTypes.explicit(optiqRetType); + udfInfo.m_operandTypeInference = InferTypes.explicit(optiqArgTypes); + ImmutableList.Builder typeFamilyBuilder = new ImmutableList.Builder(); + for (RelDataType at : optiqArgTypes) { + typeFamilyBuilder.add(Util.first(at.getSqlTypeName().getFamily(), SqlTypeFamily.ANY)); + } + udfInfo.m_operandTypeChecker = OperandTypes.family(typeFamilyBuilder.build()); + + udfInfo.m_argTypes = ImmutableList. copyOf(optiqArgTypes); + udfInfo.m_retType = optiqRetType; + + return udfInfo; + } + + public static SqlOperator getOptiqFn(String hiveUdfName, + ImmutableList optiqArgTypes, RelDataType optiqRetType) { + SqlOperator optiqOp = hiveToOptiq.get(hiveUdfName); + if (optiqOp == null) { + OptiqUDFInfo uInf = getUDFInfo(hiveUdfName, optiqArgTypes, optiqRetType); + optiqOp = new SqlFunction(uInf.m_udfName, SqlKind.OTHER_FUNCTION, uInf.m_returnTypeInference, + uInf.m_operandTypeInference, uInf.m_operandTypeChecker, + SqlFunctionCategory.USER_DEFINED_FUNCTION); + } + + return optiqOp; + } + + public static SqlAggFunction getOptiqAggFn(String hiveUdfName, + ImmutableList optiqArgTypes, RelDataType optiqRetType) { + SqlAggFunction optiqAggFn = (SqlAggFunction) hiveToOptiq.get(hiveUdfName); + if (optiqAggFn == null) { + OptiqUDFInfo uInf = getUDFInfo(hiveUdfName, optiqArgTypes, optiqRetType); + + optiqAggFn = new OptiqUDAF(uInf.m_udfName, uInf.m_returnTypeInference, + uInf.m_operandTypeInference, uInf.m_operandTypeChecker, uInf.m_argTypes, uInf.m_retType); + } + + return optiqAggFn; + } + + static class HiveToken { + int type; + String text; + String[] args; + + HiveToken(int type, String text, String... args) { + this.type = type; + this.text = text; + this.args = args; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java new file mode 100644 index 0000000..755b5ce --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java @@ -0,0 +1,305 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.RowSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter.HiveToken; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.reltype.RelDataTypeFactoryImpl.JavaType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.sql.type.SqlTypeName; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMap.Builder; +import com.google.common.collect.Lists; + +public class TypeConverter { + private static final Map m_optiqToHiveTypeNameMap; + + // TODO: Handling of char[], varchar[], string... + static { + Builder b = ImmutableMap. builder(); + b.put(SqlTypeName.BOOLEAN.getName(), new HiveToken(HiveParser.TOK_BOOLEAN, "TOK_BOOLEAN")); + b.put(SqlTypeName.TINYINT.getName(), new HiveToken(HiveParser.TOK_TINYINT, "TOK_TINYINT")); + b.put(SqlTypeName.SMALLINT.getName(), new HiveToken(HiveParser.TOK_SMALLINT, "TOK_SMALLINT")); + b.put(SqlTypeName.INTEGER.getName(), new HiveToken(HiveParser.TOK_INT, "TOK_INT")); + b.put(SqlTypeName.BIGINT.getName(), new HiveToken(HiveParser.TOK_BIGINT, "TOK_BIGINT")); + b.put(SqlTypeName.FLOAT.getName(), new HiveToken(HiveParser.TOK_FLOAT, "TOK_FLOAT")); + b.put(SqlTypeName.DOUBLE.getName(), new HiveToken(HiveParser.TOK_DOUBLE, "TOK_DOUBLE")); + b.put(SqlTypeName.DATE.getName(), new HiveToken(HiveParser.TOK_DATE, "TOK_DATE")); + b.put(SqlTypeName.TIMESTAMP.getName(), new HiveToken(HiveParser.TOK_TIMESTAMP, "TOK_TIMESTAMP")); + b.put(SqlTypeName.BINARY.getName(), new HiveToken(HiveParser.TOK_BINARY, "TOK_BINARY")); + m_optiqToHiveTypeNameMap = b.build(); + }; + + /*********************** Convert Hive Types To Optiq Types ***********************/ + public static RelDataType getType(RelOptCluster cluster, List cInfoLst) { + RexBuilder rexBuilder = cluster.getRexBuilder(); + RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); + List fieldTypes = new LinkedList(); + List fieldNames = new LinkedList(); + + for (ColumnInfo ci : cInfoLst) { + fieldTypes.add(convert(ci.getType(), dtFactory)); + fieldNames.add(ci.getInternalName()); + } + return dtFactory.createStructType(fieldTypes, fieldNames); + } + + public static RelDataType getType(RelOptCluster cluster, RowResolver rr, List neededCols) { + RexBuilder rexBuilder = cluster.getRexBuilder(); + RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); + RowSchema rs = rr.getRowSchema(); + List fieldTypes = new LinkedList(); + List fieldNames = new LinkedList(); + + for (ColumnInfo ci : rs.getSignature()) { + if (neededCols == null || neededCols.contains(ci.getInternalName())) { + fieldTypes.add(convert(ci.getType(), dtFactory)); + fieldNames.add(ci.getInternalName()); + } + } + return dtFactory.createStructType(fieldTypes, fieldNames); + } + + public static RelDataType convert(TypeInfo type, RelDataTypeFactory dtFactory) { + RelDataType convertedType = null; + + switch (type.getCategory()) { + case PRIMITIVE: + convertedType = convert((PrimitiveTypeInfo) type, dtFactory); + break; + case LIST: + convertedType = convert((ListTypeInfo) type, dtFactory); + break; + case MAP: + convertedType = convert((MapTypeInfo) type, dtFactory); + break; + case STRUCT: + convertedType = convert((StructTypeInfo) type, dtFactory); + break; + case UNION: + convertedType = convert((UnionTypeInfo) type, dtFactory); + break; + } + return convertedType; + } + + public static RelDataType convert(PrimitiveTypeInfo type, RelDataTypeFactory dtFactory) { + RelDataType convertedType = null; + + switch (type.getPrimitiveCategory()) { + case VOID: + convertedType = dtFactory.createSqlType(SqlTypeName.NULL); + break; + case BOOLEAN: + convertedType = dtFactory.createSqlType(SqlTypeName.BOOLEAN); + break; + case BYTE: + convertedType = dtFactory.createSqlType(SqlTypeName.TINYINT); + break; + case SHORT: + convertedType = dtFactory.createSqlType(SqlTypeName.SMALLINT); + break; + case INT: + convertedType = dtFactory.createSqlType(SqlTypeName.INTEGER); + break; + case LONG: + convertedType = dtFactory.createSqlType(SqlTypeName.BIGINT); + break; + case FLOAT: + convertedType = dtFactory.createSqlType(SqlTypeName.FLOAT); + break; + case DOUBLE: + convertedType = dtFactory.createSqlType(SqlTypeName.DOUBLE); + break; + case STRING: + convertedType = dtFactory.createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE); + break; + case DATE: + convertedType = dtFactory.createSqlType(SqlTypeName.DATE); + break; + case TIMESTAMP: + convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP); + break; + case BINARY: + convertedType = dtFactory.createSqlType(SqlTypeName.BINARY); + break; + case DECIMAL: + DecimalTypeInfo dtInf = (DecimalTypeInfo) type; + convertedType = dtFactory + .createSqlType(SqlTypeName.DECIMAL, dtInf.precision(), dtInf.scale()); + break; + case VARCHAR: + convertedType = dtFactory.createSqlType(SqlTypeName.VARCHAR, + ((BaseCharTypeInfo) type).getLength()); + break; + case CHAR: + convertedType = dtFactory.createSqlType(SqlTypeName.CHAR, + ((BaseCharTypeInfo) type).getLength()); + break; + case UNKNOWN: + convertedType = dtFactory.createSqlType(SqlTypeName.OTHER); + break; + } + + if (null == convertedType) { + throw new RuntimeException("Unsupported Type : " + type.getTypeName()); + } + + return convertedType; + } + + public static RelDataType convert(ListTypeInfo lstType, RelDataTypeFactory dtFactory) { + RelDataType elemType = convert(lstType.getListElementTypeInfo(), dtFactory); + return dtFactory.createArrayType(elemType, -1); + } + + public static RelDataType convert(MapTypeInfo mapType, RelDataTypeFactory dtFactory) { + RelDataType keyType = convert(mapType.getMapKeyTypeInfo(), dtFactory); + RelDataType valueType = convert(mapType.getMapValueTypeInfo(), dtFactory); + return dtFactory.createMapType(keyType, valueType); + } + + public static RelDataType convert(StructTypeInfo structType, final RelDataTypeFactory dtFactory) { + List fTypes = Lists.transform(structType.getAllStructFieldTypeInfos(), + new Function() { + @Override + public RelDataType apply(TypeInfo tI) { + return convert(tI, dtFactory); + } + }); + return dtFactory.createStructType(fTypes, structType.getAllStructFieldNames()); + } + + public static RelDataType convert(UnionTypeInfo unionType, RelDataTypeFactory dtFactory) { + // @todo what do we about unions? + throw new UnsupportedOperationException(); + } + + public static TypeInfo convert(RelDataType rType) { + if (rType.isStruct()) { + return convertStructType(rType); + } else if (rType.getComponentType() != null) { + return convertListType(rType); + } else if (rType.getKeyType() != null) { + return convertMapType(rType); + } else { + return convertPrimtiveType(rType); + } + } + + public static TypeInfo convertStructType(RelDataType rType) { + List fTypes = Lists.transform(rType.getFieldList(), + new Function() { + @Override + public TypeInfo apply(RelDataTypeField f) { + return convert(f.getType()); + } + }); + List fNames = Lists.transform(rType.getFieldList(), + new Function() { + @Override + public String apply(RelDataTypeField f) { + return f.getName(); + } + }); + return TypeInfoFactory.getStructTypeInfo(fNames, fTypes); + } + + public static TypeInfo convertMapType(RelDataType rType) { + return TypeInfoFactory.getMapTypeInfo(convert(rType.getKeyType()), + convert(rType.getValueType())); + } + + public static TypeInfo convertListType(RelDataType rType) { + return TypeInfoFactory.getListTypeInfo(convert(rType.getComponentType())); + } + + public static TypeInfo convertPrimtiveType(RelDataType rType) { + switch (rType.getSqlTypeName()) { + case BOOLEAN: + return TypeInfoFactory.booleanTypeInfo; + case TINYINT: + return TypeInfoFactory.byteTypeInfo; + case SMALLINT: + return TypeInfoFactory.shortTypeInfo; + case INTEGER: + return TypeInfoFactory.intTypeInfo; + case BIGINT: + return TypeInfoFactory.longTypeInfo; + case FLOAT: + return TypeInfoFactory.floatTypeInfo; + case DOUBLE: + return TypeInfoFactory.doubleTypeInfo; + case DATE: + return TypeInfoFactory.dateTypeInfo; + case TIMESTAMP: + return TypeInfoFactory.timestampTypeInfo; + case BINARY: + return TypeInfoFactory.binaryTypeInfo; + case DECIMAL: + return TypeInfoFactory.getDecimalTypeInfo(rType.getPrecision(), rType.getScale()); + case VARCHAR: + if (rType.getPrecision() == Integer.MAX_VALUE) + return TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + else + return TypeInfoFactory.getVarcharTypeInfo(rType.getPrecision()); + case CHAR: + return TypeInfoFactory.getCharTypeInfo(rType.getPrecision()); + case OTHER: + default: + return TypeInfoFactory.voidTypeInfo; + } + + } + + /*********************** Convert Optiq Types To Hive Types ***********************/ + public static HiveToken hiveToken(RelDataType optiqType) { + HiveToken ht = null; + + switch (optiqType.getSqlTypeName()) { + case CHAR: { + ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(optiqType.getPrecision())); + } + break; + case VARCHAR: { + if (optiqType.getPrecision() == Integer.MAX_VALUE) + ht = new HiveToken(HiveParser.TOK_STRING, "TOK_STRING", String.valueOf(optiqType + .getPrecision())); + else + ht = new HiveToken(HiveParser.TOK_VARCHAR, "TOK_VARCHAR", String.valueOf(optiqType + .getPrecision())); + } + break; + case DECIMAL: { + ht = new HiveToken(HiveParser.TOK_DECIMAL, "TOK_DECIMAL", String.valueOf(optiqType + .getPrecision()), String.valueOf(optiqType.getScale())); + } + break; + default: + ht = m_optiqToHiveTypeNameMap.get(optiqType.getSqlTypeName().getName()); + } + + return ht; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index c6669af..8df86bb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; /** * The transformation step that does partition pruning. @@ -155,27 +156,81 @@ public static PrunedPartitionList prune(TableScanOperator ts, ParseContext parse * pruner condition. * @throws HiveException */ - private static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, + public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, Map prunedPartitionsMap) throws SemanticException { + LOG.trace("Started pruning partiton"); LOG.trace("dbname = " + tab.getDbName()); LOG.trace("tabname = " + tab.getTableName()); - LOG.trace("prune Expression = " + prunerExpr); + LOG.trace("prune Expression = " + prunerExpr == null ? "" : prunerExpr); + + if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE)) + && !hasColumnExpr(prunerExpr)) { + // If the "strict" mode is on, we have to provide partition pruner for each table. + throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE + .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"")); + } String key = tab.getDbName() + "." + tab.getTableName() + ";"; - if (prunerExpr != null) { - key = key + prunerExpr.getExprString(); + if (!tab.isPartitioned() || prunerExpr == null) { + // If the table is not partitioned, return everything. + // Or, Non-strict mode, and there is no predicates at all - get everything. + return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap); + } + + Set partColsUsedInFilter = new LinkedHashSet(); + // Replace virtual columns with nulls. See javadoc for details. + prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), partColsUsedInFilter); + // Remove all parts that are not partition columns. See javadoc for details. + ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone()); + String oldFilter = prunerExpr.getExprString(); + if (compactExpr == null) { + // Non-strict mode, and all the predicates are on non-partition columns - get everything. + LOG.debug("Filter " + oldFilter + " was null after compacting"); + return getAllPartsFromCacheOrServer(tab, key, true, prunedPartitionsMap); + } + LOG.debug("Filter w/ compacting: " + compactExpr.getExprString() + + "; filter w/o compacting: " + oldFilter); + + key = key + compactExpr.getExprString(); + PrunedPartitionList ppList = prunedPartitionsMap.get(key); + if (ppList != null) { + return ppList; } - PrunedPartitionList ret = prunedPartitionsMap.get(key); - if (ret != null) { - return ret; + + ppList = getPartitionsFromServer(tab, compactExpr, conf, alias, partColsUsedInFilter, oldFilter.equals(compactExpr.getExprString())); + prunedPartitionsMap.put(key, ppList); + return ppList; + } + + private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, String key, boolean unknownPartitions, + Map partsCache) throws SemanticException { + PrunedPartitionList ppList = partsCache.get(key); + if (ppList != null) { + return ppList; + } + Set parts; + try { + parts = getAllPartitions(tab); + } catch (HiveException e) { + throw new SemanticException(e); } + ppList = new PrunedPartitionList(tab, parts, null, unknownPartitions); + partsCache.put(key, ppList); + return ppList; + } - ret = getPartitionsFromServer(tab, prunerExpr, conf, alias); - prunedPartitionsMap.put(key, ret); - return ret; + private static ExprNodeDesc removeTruePredciates(ExprNodeDesc e) { + if (e instanceof ExprNodeConstantDesc) { + ExprNodeConstantDesc eC = (ExprNodeConstantDesc) e; + if (e.getTypeInfo() == TypeInfoFactory.booleanTypeInfo + && eC.getValue() == Boolean.TRUE) { + return null; + } + } + return e; } /** @@ -187,7 +242,8 @@ private static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, */ static private ExprNodeDesc compactExpr(ExprNodeDesc expr) { if (expr instanceof ExprNodeConstantDesc) { - if (((ExprNodeConstantDesc)expr).getValue() == null) { + expr = removeTruePredciates(expr); + if (expr == null || ((ExprNodeConstantDesc)expr).getValue() == null) { return null; } else { throw new IllegalStateException("Unexpected non-null ExprNodeConstantDesc: " @@ -198,10 +254,11 @@ static private ExprNodeDesc compactExpr(ExprNodeDesc expr) { boolean isAnd = udf instanceof GenericUDFOPAnd; if (isAnd || udf instanceof GenericUDFOPOr) { List children = expr.getChildren(); - ExprNodeDesc left = children.get(0); - children.set(0, compactExpr(left)); - ExprNodeDesc right = children.get(1); - children.set(1, compactExpr(right)); + ExprNodeDesc left = removeTruePredciates(children.get(0)); + children.set(0, left == null ? null : compactExpr(left)); + ExprNodeDesc right = removeTruePredciates(children.get(1)); + children.set(1, right == null ? null : compactExpr(right)); + // Note that one does not simply compact (not-null or null) to not-null. // Only if we have an "and" is it valid to send one side to metastore. if (children.get(0) == null && children.get(1) == null) { @@ -267,40 +324,8 @@ static private boolean hasUserFunctions(ExprNodeDesc expr) { } private static PrunedPartitionList getPartitionsFromServer(Table tab, - ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws SemanticException { + final ExprNodeGenericFuncDesc compactExpr, HiveConf conf, String alias, Set partColsUsedInFilter, boolean isPruningByExactFilter) throws SemanticException { try { - if (!tab.isPartitioned()) { - // If the table is not partitioned, return everything. - return new PrunedPartitionList(tab, getAllPartitions(tab), null, false); - } - LOG.debug("tabname = " + tab.getTableName() + " is partitioned"); - - if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE)) - && !hasColumnExpr(prunerExpr)) { - // If the "strict" mode is on, we have to provide partition pruner for each table. - throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE - .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"")); - } - - if (prunerExpr == null) { - // Non-strict mode, and there is no predicates at all - get everything. - return new PrunedPartitionList(tab, getAllPartitions(tab), null, false); - } - - Set referred = new LinkedHashSet(); - // Replace virtual columns with nulls. See javadoc for details. - prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), referred); - // Remove all parts that are not partition columns. See javadoc for details. - ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone()); - String oldFilter = prunerExpr.getExprString(); - if (compactExpr == null) { - // Non-strict mode, and all the predicates are on non-partition columns - get everything. - LOG.debug("Filter " + oldFilter + " was null after compacting"); - return new PrunedPartitionList(tab, getAllPartitions(tab), null, true); - } - - LOG.debug("Filter w/ compacting: " + compactExpr.getExprString() - + "; filter w/o compacting: " + oldFilter); // Finally, check the filter for non-built-in UDFs. If these are present, we cannot // do filtering on the server, and have to fall back to client path. @@ -330,9 +355,8 @@ private static PrunedPartitionList getPartitionsFromServer(Table tab, // The partitions are "unknown" if the call says so due to the expression // evaluator returning null for a partition, or if we sent a partial expression to // metastore and so some partitions may have no data based on other filters. - boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString()); return new PrunedPartitionList(tab, new LinkedHashSet(partitions), - new ArrayList(referred), + new ArrayList(partColsUsedInFilter), hasUnknownPartitions || !isPruningByExactFilter); } catch (SemanticException e) { throw e; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index b5b2b60..ae5e929 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -194,7 +194,7 @@ protected static Hive createHiveDB(HiveConf conf) throws SemanticException { } public abstract void analyzeInternal(ASTNode ast) throws SemanticException; - public void init() { + public void init(boolean clearPartsCache) { //no-op } @@ -204,7 +204,7 @@ public void initCtx(Context ctx) { public void analyze(ASTNode ast, Context ctx) throws SemanticException { initCtx(ctx); - init(); + init(true); analyzeInternal(ast); } @@ -231,7 +231,7 @@ public void setFetchTask(FetchTask fetchTask) { this.fetchTask = fetchTask; } - protected void reset() { + protected void reset(boolean clearPartsCache) { rootTasks = new ArrayList>(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 44c193f..abf6f5e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -58,7 +58,7 @@ private Table tbl; public ColumnStatsSemanticAnalyzer(HiveConf conf) throws SemanticException { - super(conf); + super(conf, false); } private boolean shouldRewrite(ASTNode tree) { @@ -347,7 +347,7 @@ private ASTNode genRewrittenTree(String rewrittenQuery) throws SemanticException } public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws SemanticException { - super(conf); + super(conf, false); // check if it is no scan. grammar prevents coexit noscan/columns super.processNoScanCommand(tree); // check if it is partial scan. grammar prevents coexit partialscan/columns @@ -419,7 +419,7 @@ public void analyze(ASTNode ast, Context origCtx) throws SemanticException { QBParseInfo qbp; // initialize QB - init(); + init(true); // Setup the necessary metadata if originating from analyze rewrite if (isRewritten) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCondnTypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCondnTypeCheckProcFactory.java new file mode 100644 index 0000000..a14d392 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCondnTypeCheckProcFactory.java @@ -0,0 +1,1111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.sql.Date; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FunctionInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; +import org.apache.hadoop.hive.ql.udf.SettableUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; + +/** + * The Factory for creating typecheck processors. The typecheck processors are + * used to processes the syntax trees for Join Condition expressions and convert them into + * expression Node Descriptor trees. They also introduce the correct conversion + * functions to do proper implicit conversion. + */ + +/** + * TODO:
+ * 1. Migrate common code in to TypeCheckProcFactory
+ * 2. Verify if disallowed expressions in join condition would even come here or + * would result in exception in phase1 analysis
+ * 3. IS all of the expressions for "getStrExprProcessor" allowed as part of + * join condition
+ */ +public final class JoinCondnTypeCheckProcFactory { + + protected static final Log LOG = LogFactory + .getLog(JoinCondnTypeCheckProcFactory.class + .getName()); + + private JoinCondnTypeCheckProcFactory() { + // prevent instantiation + } + + public static Map genExprNode(ASTNode expr, + JoinTypeCheckCtx tcCtx) throws SemanticException { + // Create the walker, the rules dispatcher and the context. + // create a walker which walks the tree in a DFS manner while maintaining + // the operator stack. The dispatcher + // generates the plan from the operator tree + Map opRules = new LinkedHashMap(); + + opRules.put(new RuleRegExp("R1", HiveParser.TOK_NULL + "%"), + getNullExprProcessor()); + opRules.put(new RuleRegExp("R2", HiveParser.Number + "%|" + + HiveParser.TinyintLiteral + "%|" + HiveParser.SmallintLiteral + "%|" + + HiveParser.BigintLiteral + "%|" + HiveParser.DecimalLiteral + "%"), + getNumExprProcessor()); + opRules.put(new RuleRegExp("R3", HiveParser.Identifier + "%|" + + HiveParser.StringLiteral + "%|" + HiveParser.TOK_CHARSETLITERAL + + "%|" + HiveParser.TOK_STRINGLITERALSEQUENCE + "%|" + "%|" + + HiveParser.KW_IF + "%|" + HiveParser.KW_CASE + "%|" + + HiveParser.KW_WHEN + "%|" + HiveParser.KW_IN + "%|" + + HiveParser.KW_ARRAY + "%|" + HiveParser.KW_MAP + "%|" + + HiveParser.KW_STRUCT + "%|" + HiveParser.KW_EXISTS + "%|" + + HiveParser.TOK_SUBQUERY_OP_NOTIN + "%"), getStrExprProcessor()); + opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|" + + HiveParser.KW_FALSE + "%"), getBoolExprProcessor()); + opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%"), + getDateExprProcessor()); + opRules.put(new RuleRegExp("R6", HiveParser.TOK_TABLE_OR_COL + "%"), + getColumnExprProcessor()); + opRules.put(new RuleRegExp("R7", HiveParser.TOK_SUBQUERY_OP + "%"), + getSubQueryExprProcessor()); + + // The dispatcher fires the processor corresponding to the closest matching + // rule and passes the context along + Dispatcher disp = new DefaultRuleDispatcher(getDefaultExprProcessor(), + opRules, tcCtx); + GraphWalker ogw = new DefaultGraphWalker(disp); + + // Create a list of topop nodes + ArrayList topNodes = new ArrayList(); + topNodes.add(expr); + HashMap nodeOutputs = new LinkedHashMap(); + ogw.startWalking(topNodes, nodeOutputs); + + return convert(nodeOutputs); + } + + // temporary type-safe casting + private static Map convert(Map outputs) { + Map converted = new LinkedHashMap(); + for (Map.Entry entry : outputs.entrySet()) { + if (entry.getKey() instanceof ASTNode + && (entry.getValue() == null || entry.getValue() instanceof ExprNodeDesc)) { + converted + .put((ASTNode) entry.getKey(), (ExprNodeDesc) entry.getValue()); + } else { + LOG.warn("Invalid type entry " + entry); + } + } + return converted; + } + + /** + * Processor for processing NULL expression. + */ + public static class NullExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + return new ExprNodeNullDesc(); + } + + } + + /** + * Factory method to get NullExprProcessor. + * + * @return NullExprProcessor. + */ + public static NullExprProcessor getNullExprProcessor() { + return new NullExprProcessor(); + } + + /** + * Processor for processing numeric constants. + */ + public static class NumExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + Number v = null; + ASTNode expr = (ASTNode) nd; + // The expression can be any one of Double, Long and Integer. We + // try to parse the expression in that order to ensure that the + // most specific type is used for conversion. + try { + if (expr.getText().endsWith("L")) { + // Literal bigint. + v = Long.valueOf(expr.getText().substring(0, + expr.getText().length() - 1)); + } else if (expr.getText().endsWith("S")) { + // Literal smallint. + v = Short.valueOf(expr.getText().substring(0, + expr.getText().length() - 1)); + } else if (expr.getText().endsWith("Y")) { + // Literal tinyint. + v = Byte.valueOf(expr.getText().substring(0, + expr.getText().length() - 1)); + } else if (expr.getText().endsWith("BD")) { + // Literal decimal + String strVal = expr.getText().substring(0, + expr.getText().length() - 2); + HiveDecimal hd = HiveDecimal.create(strVal); + int prec = 1; + int scale = 0; + if (hd != null) { + prec = hd.precision(); + scale = hd.scale(); + } + DecimalTypeInfo typeInfo = TypeInfoFactory.getDecimalTypeInfo(prec, + scale); + return new ExprNodeConstantDesc(typeInfo, strVal); + } else { + v = Double.valueOf(expr.getText()); + v = Long.valueOf(expr.getText()); + v = Integer.valueOf(expr.getText()); + } + } catch (NumberFormatException e) { + // do nothing here, we will throw an exception in the following block + } + if (v == null) { + throw new SemanticException( + ErrorMsg.INVALID_NUMERICAL_CONSTANT.getMsg(expr)); + } + return new ExprNodeConstantDesc(v); + } + + } + + /** + * Factory method to get NumExprProcessor. + * + * @return NumExprProcessor. + */ + public static NumExprProcessor getNumExprProcessor() { + return new NumExprProcessor(); + } + + /** + * Processor for processing string constants. + */ + public static class StrExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + String str = null; + + switch (expr.getToken().getType()) { + case HiveParser.StringLiteral: + str = BaseSemanticAnalyzer.unescapeSQLString(expr.getText()); + break; + case HiveParser.TOK_STRINGLITERALSEQUENCE: + StringBuilder sb = new StringBuilder(); + for (Node n : expr.getChildren()) { + sb.append(BaseSemanticAnalyzer.unescapeSQLString(((ASTNode) n) + .getText())); + } + str = sb.toString(); + break; + case HiveParser.TOK_CHARSETLITERAL: + str = BaseSemanticAnalyzer.charSetString(expr.getChild(0).getText(), + expr.getChild(1).getText()); + break; + default: + // HiveParser.identifier | HiveParse.KW_IF | HiveParse.KW_LEFT | + // HiveParse.KW_RIGHT + str = BaseSemanticAnalyzer.unescapeIdentifier(expr.getText()); + break; + } + return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, str); + } + + } + + /** + * Factory method to get StrExprProcessor. + * + * @return StrExprProcessor. + */ + public static StrExprProcessor getStrExprProcessor() { + return new StrExprProcessor(); + } + + /** + * Processor for boolean constants. + */ + public static class BoolExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + Boolean bool = null; + + switch (expr.getToken().getType()) { + case HiveParser.KW_TRUE: + bool = Boolean.TRUE; + break; + case HiveParser.KW_FALSE: + bool = Boolean.FALSE; + break; + default: + assert false; + } + return new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, bool); + } + + } + + /** + * Factory method to get BoolExprProcessor. + * + * @return BoolExprProcessor. + */ + public static BoolExprProcessor getBoolExprProcessor() { + return new BoolExprProcessor(); + } + + /** + * Processor for date constants. + */ + public static class DateExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + + // Get the string value and convert to a Date value. + try { + String dateString = BaseSemanticAnalyzer.stripQuotes(expr.getText()); + Date date = Date.valueOf(dateString); + return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, date); + } catch (IllegalArgumentException err) { + throw new SemanticException( + "Unable to convert date literal string to date value.", err); + } + } + } + + /** + * Factory method to get DateExprProcessor. + * + * @return DateExprProcessor. + */ + public static DateExprProcessor getDateExprProcessor() { + return new DateExprProcessor(); + } + + /** + * Processor for table columns. + */ + public static class ColumnExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + ASTNode parent = stack.size() > 1 ? (ASTNode) stack.get(stack.size() - 2) + : null; + + if (expr.getType() != HiveParser.TOK_TABLE_OR_COL) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr), expr); + return null; + } + + assert (expr.getChildCount() == 1); + String tableOrCol = BaseSemanticAnalyzer.unescapeIdentifier(expr + .getChild(0).getText()); + + boolean qualifiedAccess = (parent != null && parent.getType() == HiveParser.DOT); + + ColumnInfo colInfo = null; + if (!qualifiedAccess) { + colInfo = getColInfo(ctx, null, tableOrCol, expr); + // It's a column. + return new ExprNodeColumnDesc(colInfo.getType(), + colInfo.getInternalName(), colInfo.getTabAlias(), + colInfo.getIsVirtualCol()); + } else if (hasTableAlias(ctx, tableOrCol, expr)) { + return null; + } else { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr), expr); + return null; + } + } + } + + /** + * Factory method to get ColumnExprProcessor. + * + * @return ColumnExprProcessor. + */ + public static ColumnExprProcessor getColumnExprProcessor() { + return new ColumnExprProcessor(); + } + + /** + * The default processor for typechecking. + */ + public static class DefaultExprProcessor implements NodeProcessor { + + static HashMap specialUnaryOperatorTextHashMap; + static HashMap specialFunctionTextHashMap; + static HashMap conversionFunctionTextHashMap; + static HashSet windowingTokens; + static { + specialUnaryOperatorTextHashMap = new HashMap(); + specialUnaryOperatorTextHashMap.put(HiveParser.PLUS, "positive"); + specialUnaryOperatorTextHashMap.put(HiveParser.MINUS, "negative"); + specialFunctionTextHashMap = new HashMap(); + specialFunctionTextHashMap.put(HiveParser.TOK_ISNULL, "isnull"); + specialFunctionTextHashMap.put(HiveParser.TOK_ISNOTNULL, "isnotnull"); + conversionFunctionTextHashMap = new HashMap(); + conversionFunctionTextHashMap.put(HiveParser.TOK_BOOLEAN, + serdeConstants.BOOLEAN_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_TINYINT, + serdeConstants.TINYINT_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_SMALLINT, + serdeConstants.SMALLINT_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_INT, + serdeConstants.INT_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_BIGINT, + serdeConstants.BIGINT_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_FLOAT, + serdeConstants.FLOAT_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_DOUBLE, + serdeConstants.DOUBLE_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_STRING, + serdeConstants.STRING_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_CHAR, + serdeConstants.CHAR_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_VARCHAR, + serdeConstants.VARCHAR_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_BINARY, + serdeConstants.BINARY_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_DATE, + serdeConstants.DATE_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_TIMESTAMP, + serdeConstants.TIMESTAMP_TYPE_NAME); + conversionFunctionTextHashMap.put(HiveParser.TOK_DECIMAL, + serdeConstants.DECIMAL_TYPE_NAME); + + windowingTokens = new HashSet(); + windowingTokens.add(HiveParser.KW_OVER); + windowingTokens.add(HiveParser.TOK_PARTITIONINGSPEC); + windowingTokens.add(HiveParser.TOK_DISTRIBUTEBY); + windowingTokens.add(HiveParser.TOK_SORTBY); + windowingTokens.add(HiveParser.TOK_CLUSTERBY); + windowingTokens.add(HiveParser.TOK_WINDOWSPEC); + windowingTokens.add(HiveParser.TOK_WINDOWRANGE); + windowingTokens.add(HiveParser.TOK_WINDOWVALUES); + windowingTokens.add(HiveParser.KW_UNBOUNDED); + windowingTokens.add(HiveParser.KW_PRECEDING); + windowingTokens.add(HiveParser.KW_FOLLOWING); + windowingTokens.add(HiveParser.KW_CURRENT); + windowingTokens.add(HiveParser.TOK_TABSORTCOLNAMEASC); + windowingTokens.add(HiveParser.TOK_TABSORTCOLNAMEDESC); + } + + private static boolean isRedundantConversionFunction(ASTNode expr, + boolean isFunction, ArrayList children) { + if (!isFunction) { + return false; + } + // conversion functions take a single parameter + if (children.size() != 1) { + return false; + } + String funcText = conversionFunctionTextHashMap.get(((ASTNode) expr + .getChild(0)).getType()); + // not a conversion function + if (funcText == null) { + return false; + } + // return true when the child type and the conversion target type is the + // same + return ((PrimitiveTypeInfo) children.get(0).getTypeInfo()).getTypeName() + .equalsIgnoreCase(funcText); + } + + public static String getFunctionText(ASTNode expr, boolean isFunction) { + String funcText = null; + if (!isFunction) { + // For operator, the function name is the operator text, unless it's in + // our special dictionary + if (expr.getChildCount() == 1) { + funcText = specialUnaryOperatorTextHashMap.get(expr.getType()); + } + if (funcText == null) { + funcText = expr.getText(); + } + } else { + // For TOK_FUNCTION, the function name is stored in the first child, + // unless it's in our + // special dictionary. + assert (expr.getChildCount() >= 1); + int funcType = ((ASTNode) expr.getChild(0)).getType(); + funcText = specialFunctionTextHashMap.get(funcType); + if (funcText == null) { + funcText = conversionFunctionTextHashMap.get(funcType); + } + if (funcText == null) { + funcText = ((ASTNode) expr.getChild(0)).getText(); + } + } + return BaseSemanticAnalyzer.unescapeIdentifier(funcText); + } + + /** + * This function create an ExprNodeDesc for a UDF function given the + * children (arguments). It will insert implicit type conversion functions + * if necessary. + * + * @throws UDFArgumentException + */ + static ExprNodeDesc getFuncExprNodeDescWithUdfData(String udfName, + TypeInfo typeInfo, ExprNodeDesc... children) + throws UDFArgumentException { + + FunctionInfo fi = FunctionRegistry.getFunctionInfo(udfName); + if (fi == null) { + throw new UDFArgumentException(udfName + " not found."); + } + + GenericUDF genericUDF = fi.getGenericUDF(); + if (genericUDF == null) { + throw new UDFArgumentException(udfName + + " is an aggregation function or a table function."); + } + + // Add udfData to UDF if necessary + if (typeInfo != null) { + if (genericUDF instanceof SettableUDF) { + ((SettableUDF) genericUDF).setTypeInfo(typeInfo); + } + } + + List childrenList = new ArrayList( + children.length); + childrenList.addAll(Arrays.asList(children)); + return ExprNodeGenericFuncDesc.newInstance(genericUDF, childrenList); + } + + public static ExprNodeDesc getFuncExprNodeDesc(String udfName, + ExprNodeDesc... children) throws UDFArgumentException { + return getFuncExprNodeDescWithUdfData(udfName, null, children); + } + + static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, + boolean isFunction, ArrayList children, + JoinTypeCheckCtx ctx) throws SemanticException, UDFArgumentException { + // return the child directly if the conversion is redundant. + if (isRedundantConversionFunction(expr, isFunction, children)) { + assert (children.size() == 1); + assert (children.get(0) != null); + return children.get(0); + } + String funcText = getFunctionText(expr, isFunction); + ExprNodeDesc desc; + if (funcText.equals(".")) { + throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(expr)); + } else if (funcText.equals("[")) { + // "[]" : LSQUARE/INDEX Expression + throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(expr)); + } else { + // other operators or functions + FunctionInfo fi = FunctionRegistry.getFunctionInfo(funcText); + + if (fi == null) { + if (isFunction) { + throw new SemanticException( + ErrorMsg.INVALID_FUNCTION.getMsg((ASTNode) expr.getChild(0))); + } else { + throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(expr)); + } + } + + // getGenericUDF() actually clones the UDF. Just call it once and reuse. + GenericUDF genericUDF = fi.getGenericUDF(); + + if (genericUDF instanceof GenericUDFOPOr) { + throw new SemanticException( + ErrorMsg.INVALID_FUNCTION.getMsg((ASTNode) expr.getChild(0))); + } + + // Handle type casts that may contain type parameters + if (isFunction) { + ASTNode funcNameNode = (ASTNode) expr.getChild(0); + switch (funcNameNode.getType()) { + case HiveParser.TOK_CHAR: + // Add type params + CharTypeInfo charTypeInfo = ParseUtils + .getCharTypeInfo(funcNameNode); + if (genericUDF != null) { + ((SettableUDF) genericUDF).setTypeInfo(charTypeInfo); + } + break; + case HiveParser.TOK_VARCHAR: + VarcharTypeInfo varcharTypeInfo = ParseUtils + .getVarcharTypeInfo(funcNameNode); + if (genericUDF != null) { + ((SettableUDF) genericUDF).setTypeInfo(varcharTypeInfo); + } + break; + case HiveParser.TOK_DECIMAL: + DecimalTypeInfo decTypeInfo = ParseUtils + .getDecimalTypeTypeInfo(funcNameNode); + if (genericUDF != null) { + ((SettableUDF) genericUDF).setTypeInfo(decTypeInfo); + } + break; + default: + // Do nothing + break; + } + } + + // Join Condition can not contain UDTF + if (fi.getGenericUDTF() != null) { + throw new SemanticException(ErrorMsg.UDTF_INVALID_LOCATION.getMsg()); + } + + // UDAF in filter condition, group-by caluse, param of funtion, etc. + if (fi.isGenericUDAF()) { + if (isFunction) { + throw new SemanticException( + ErrorMsg.UDAF_INVALID_LOCATION.getMsg((ASTNode) expr + .getChild(0))); + } else { + throw new SemanticException( + ErrorMsg.UDAF_INVALID_LOCATION.getMsg(expr)); + } + } + + if (genericUDF != null) { + if (FunctionRegistry.isStateful(genericUDF)) { + throw new SemanticException( + ErrorMsg.UDF_STATEFUL_INVALID_LOCATION.getMsg()); + } + } + + if (!(genericUDF instanceof GenericUDFOPAnd)) { + if (!(genericUDF instanceof GenericUDFBaseCompare)) { + if (genericUDFargsRefersToBothInput(genericUDF, children, + ctx.getInputRRList())) { + ctx.setError(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr), expr); + } + } else if (genericUDF instanceof GenericUDFBaseCompare) { + if (children.size() == 2 + && !(children.get(0) instanceof ExprNodeConstantDesc) + && !(children.get(1) instanceof ExprNodeConstantDesc)) { + if (comparisonUDFargsRefersToBothInput( + (GenericUDFBaseCompare) genericUDF, children, + ctx.getInputRRList())) { + ctx.setError(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr), + expr); + return null; + } + + if (argsRefersToNeither((GenericUDFBaseCompare) genericUDF, + children, ctx.getInputRRList())) { + ctx.setError(ErrorMsg.INVALID_JOIN_CONDITION_2.getMsg(expr), + expr); + return null; + } + + if (!(genericUDF instanceof GenericUDFOPEqual)) { + ctx.setError(ErrorMsg.INVALID_FUNCTION.getMsg(expr), expr); + return null; + } + } else if (children.size() == 2 + && ((children.get(0) instanceof ExprNodeConstantDesc && children + .get(1) instanceof ExprNodeColumnDesc) || (children.get(0) instanceof ExprNodeColumnDesc && children + .get(1) instanceof ExprNodeConstantDesc))) { + int constIdx = children.get(0) instanceof ExprNodeConstantDesc ? 0 + : 1; + + Set inferTypes = new HashSet(Arrays.asList( + serdeConstants.TINYINT_TYPE_NAME.toLowerCase(), + serdeConstants.SMALLINT_TYPE_NAME.toLowerCase(), + serdeConstants.INT_TYPE_NAME.toLowerCase(), + serdeConstants.BIGINT_TYPE_NAME.toLowerCase(), + serdeConstants.FLOAT_TYPE_NAME.toLowerCase(), + serdeConstants.DOUBLE_TYPE_NAME.toLowerCase(), + serdeConstants.STRING_TYPE_NAME.toLowerCase())); + + String constType = children.get(constIdx).getTypeString() + .toLowerCase(); + String columnType = children.get(1 - constIdx).getTypeString() + .toLowerCase(); + + if (inferTypes.contains(constType) + && inferTypes.contains(columnType) + && !columnType.equalsIgnoreCase(constType)) { + Object originalValue = ((ExprNodeConstantDesc) children + .get(constIdx)).getValue(); + String constValue = originalValue.toString(); + boolean triedDouble = false; + Number value = null; + try { + if (columnType + .equalsIgnoreCase(serdeConstants.TINYINT_TYPE_NAME)) { + value = new Byte(constValue); + } else if (columnType + .equalsIgnoreCase(serdeConstants.SMALLINT_TYPE_NAME)) { + value = new Short(constValue); + } else if (columnType + .equalsIgnoreCase(serdeConstants.INT_TYPE_NAME)) { + value = new Integer(constValue); + } else if (columnType + .equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME)) { + value = new Long(constValue); + } else if (columnType + .equalsIgnoreCase(serdeConstants.FLOAT_TYPE_NAME)) { + value = new Float(constValue); + } else if (columnType + .equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)) { + triedDouble = true; + value = new Double(constValue); + } else if (columnType + .equalsIgnoreCase(serdeConstants.STRING_TYPE_NAME)) { + // Don't scramble the const type information if comparing to + // a + // string column, + // It's not useful to do so; as of now, there is also a hack + // in + // SemanticAnalyzer#genTablePlan that causes every column to + // look like a string + // a string down here, so number type information is always + // lost + // otherwise. + boolean isNumber = (originalValue instanceof Number); + triedDouble = !isNumber; + value = isNumber ? (Number) originalValue : new Double( + constValue); + } + } catch (NumberFormatException nfe) { + // this exception suggests the precise type inference did not + // succeed + // we'll try again to convert it to double + // however, if we already tried this, or the column is NUMBER + // type + // and + // the operator is EQUAL, return false due to the type + // mismatch + if (triedDouble + || (genericUDF instanceof GenericUDFOPEqual && !columnType + .equals(serdeConstants.STRING_TYPE_NAME))) { + return new ExprNodeConstantDesc(false); + } + + try { + value = new Double(constValue); + } catch (NumberFormatException ex) { + return new ExprNodeConstantDesc(false); + } + } + + if (value != null) { + children.set(constIdx, new ExprNodeConstantDesc(value)); + } + } + } + } + } + + desc = ExprNodeGenericFuncDesc.newInstance(genericUDF, funcText, + children); + } + // UDFOPPositive is a no-op. + // However, we still create it, and then remove it here, to make sure we + // only allow + // "+" for numeric types. + if (FunctionRegistry.isOpPositive(desc)) { + assert (desc.getChildren().size() == 1); + desc = desc.getChildren().get(0); + } + assert (desc != null); + return desc; + } + + private static boolean genericUDFargsRefersToBothInput(GenericUDF udf, + ArrayList children, List inputRRList) { + boolean argsRefersToBothInput = false; + + Map hasCodeToColDescMap = new HashMap(); + for (ExprNodeDesc child : children) { + ExprNodeDescUtils.getExprNodeColumnDesc(child, hasCodeToColDescMap); + } + Set inputRef = getInputRef(hasCodeToColDescMap.values(), inputRRList); + + if (inputRef.size() > 1) + argsRefersToBothInput = true; + + return argsRefersToBothInput; + } + + private static boolean comparisonUDFargsRefersToBothInput( + GenericUDFBaseCompare comparisonUDF, ArrayList children, + List inputRRList) { + boolean argsRefersToBothInput = false; + + Map lhsHashCodeToColDescMap = new HashMap(); + Map rhsHashCodeToColDescMap = new HashMap(); + ExprNodeDescUtils.getExprNodeColumnDesc(children.get(0), lhsHashCodeToColDescMap); + ExprNodeDescUtils.getExprNodeColumnDesc(children.get(1), rhsHashCodeToColDescMap); + Set lhsInputRef = getInputRef(lhsHashCodeToColDescMap.values(), inputRRList); + Set rhsInputRef = getInputRef(rhsHashCodeToColDescMap.values(), inputRRList); + + if (lhsInputRef.size() > 1 || rhsInputRef.size() > 1) + argsRefersToBothInput = true; + + return argsRefersToBothInput; + } + + private static boolean argsRefersToNeither( + GenericUDFBaseCompare comparisonUDF, ArrayList children, + List inputRRList) { + boolean argsRefersToNeither = false; + + Map lhsHashCodeToColDescMap = new HashMap(); + Map rhsHashCodeToColDescMap = new HashMap(); + ExprNodeDescUtils.getExprNodeColumnDesc(children.get(0), lhsHashCodeToColDescMap); + ExprNodeDescUtils.getExprNodeColumnDesc(children.get(1), rhsHashCodeToColDescMap); + Set lhsInputRef = getInputRef(lhsHashCodeToColDescMap.values(), inputRRList); + Set rhsInputRef = getInputRef(rhsHashCodeToColDescMap.values(), inputRRList); + + if (lhsInputRef.size() == 0 && rhsInputRef.size() == 0) + argsRefersToNeither = true; + + return argsRefersToNeither; + } + + private static Set getInputRef(Collection colDescSet, + List inputRRList) { + String tableAlias; + RowResolver inputRR; + Set inputLineage = new HashSet(); + + for (ExprNodeDesc col : colDescSet) { + ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc) col; + tableAlias = colDesc.getTabAlias(); + + for (int i = 0; i < inputRRList.size(); i++) { + inputRR = inputRRList.get(i); + + // If table Alias is present check if InputRR has that table and then check for internal name + // else if table alias is null then check with internal name in all inputRR. + if (tableAlias != null) { + if (inputRR.hasTableAlias(tableAlias)) { + if (inputRR.getInvRslvMap().containsKey(colDesc.getColumn())) { + inputLineage.add(i); + } + } + } else { + if (inputRR.getInvRslvMap().containsKey(colDesc.getColumn())) { + inputLineage.add(i); + } + } + } + } + + return inputLineage; + } + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + + /* + * Windowing is not supported in Join Condition + */ + if (windowingTokens.contains(expr.getType())) { + ctx.setError(ErrorMsg.INVALID_FUNCTION.getMsg(expr, + "Windowing is not supported in Join Condition"), expr); + + return null; + } + + if (expr.getType() == HiveParser.TOK_TABNAME) { + return null; + } + + if (expr.getType() == HiveParser.TOK_ALLCOLREF) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr, + "Join Condition does not support * syntax"), expr); + + return null; + } + + // If the first child is a TOK_TABLE_OR_COL, and nodeOutput[0] is NULL, + // and the operator is a DOT, then it's a table column reference. + if (expr.getType() == HiveParser.DOT + && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL + && nodeOutputs[0] == null) { + + String tableAlias = BaseSemanticAnalyzer.unescapeIdentifier(expr + .getChild(0).getChild(0).getText()); + // NOTE: tableAlias must be a valid non-ambiguous table alias, + // because we've checked that in TOK_TABLE_OR_COL's process method. + ColumnInfo colInfo = getColInfo(ctx, tableAlias, + ((ExprNodeConstantDesc) nodeOutputs[1]).getValue().toString(), expr); + + if (colInfo == null) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1)), expr); + return null; + } + return new ExprNodeColumnDesc(colInfo.getType(), + colInfo.getInternalName(), tableAlias, colInfo.getIsVirtualCol()); + } + + // Return nulls for conversion operators + if (conversionFunctionTextHashMap.keySet().contains(expr.getType()) + || specialFunctionTextHashMap.keySet().contains(expr.getType()) + || expr.getToken().getType() == HiveParser.CharSetName + || expr.getToken().getType() == HiveParser.CharSetLiteral) { + return null; + } + + boolean isFunction = (expr.getType() == HiveParser.TOK_FUNCTION + || expr.getType() == HiveParser.TOK_FUNCTIONSTAR || expr.getType() == HiveParser.TOK_FUNCTIONDI); + + // Create all children + int childrenBegin = (isFunction ? 1 : 0); + ArrayList children = new ArrayList( + expr.getChildCount() - childrenBegin); + for (int ci = childrenBegin; ci < expr.getChildCount(); ci++) { + if (nodeOutputs[ci] instanceof ExprNodeColumnListDesc) { + children.addAll(((ExprNodeColumnListDesc) nodeOutputs[ci]) + .getChildren()); + } else { + children.add((ExprNodeDesc) nodeOutputs[ci]); + } + } + + if (expr.getType() == HiveParser.TOK_FUNCTIONSTAR) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1)), expr); + return null; + } + + // If any of the children contains null, then return a null + // this is a hack for now to handle the group by case + if (children.contains(null)) { + List possibleColumnNames = getReferenceableColumnAliases(ctx); + String reason = String.format("(possible column names are: %s)", + StringUtils.join(possibleColumnNames, ", ")); + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(0), reason), + expr); + return null; + } + + // Create function desc + try { + return getXpathOrFuncExprNodeDesc(expr, isFunction, children, ctx); + } catch (UDFArgumentTypeException e) { + throw new SemanticException(ErrorMsg.INVALID_ARGUMENT_TYPE.getMsg( + expr.getChild(childrenBegin + e.getArgumentId()), e.getMessage())); + } catch (UDFArgumentLengthException e) { + throw new SemanticException(ErrorMsg.INVALID_ARGUMENT_LENGTH.getMsg( + expr, e.getMessage())); + } catch (UDFArgumentException e) { + throw new SemanticException(ErrorMsg.INVALID_ARGUMENT.getMsg(expr, + e.getMessage())); + } + } + + } + + /** + * Factory method to get DefaultExprProcessor. + * + * @return DefaultExprProcessor. + */ + public static DefaultExprProcessor getDefaultExprProcessor() { + return new DefaultExprProcessor(); + } + + /** + * Processor for subquery expressions.. + */ + public static class SubQueryExprProcessor implements NodeProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + ASTNode sqNode = (ASTNode) expr.getParent().getChild(1); + /* + * Restriction.1.h :: SubQueries not supported in Join Condition. + */ + ctx.setError(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(sqNode, + "SubQuery expressions are npt supported in Join Condition"), sqNode); + return null; + } + } + + /** + * Factory method to get SubQueryExprProcessor. + * + * @return DateExprProcessor. + */ + public static SubQueryExprProcessor getSubQueryExprProcessor() { + return new SubQueryExprProcessor(); + } + + private static boolean hasTableAlias(JoinTypeCheckCtx ctx, String tabName, + ASTNode expr) throws SemanticException { + int tblAliasCnt = 0; + for (RowResolver rr : ctx.getInputRRList()) { + if (rr.hasTableAlias(tabName)) + tblAliasCnt++; + } + + if (tblAliasCnt > 1) { + throw new SemanticException( + ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + + return (tblAliasCnt == 1) ? true : false; + } + + private static ColumnInfo getColInfo(JoinTypeCheckCtx ctx, String tabName, + String colAlias, ASTNode expr) throws SemanticException { + ColumnInfo tmp; + ColumnInfo cInfoToRet = null; + + for (RowResolver rr : ctx.getInputRRList()) { + tmp = rr.get(tabName, colAlias); + if (tmp != null) { + if (cInfoToRet != null) { + throw new SemanticException( + ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + cInfoToRet = tmp; + } + } + + return cInfoToRet; + } + + private static List getReferenceableColumnAliases(JoinTypeCheckCtx ctx) { + List possibleColumnNames = new ArrayList(); + for (RowResolver rr : ctx.getInputRRList()) { + possibleColumnNames.addAll(rr.getReferenceableColumnAliases(null, -1)); + } + + return possibleColumnNames; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/JoinTypeCheckCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/JoinTypeCheckCtx.java new file mode 100644 index 0000000..eb4e87c --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/JoinTypeCheckCtx.java @@ -0,0 +1,54 @@ +package org.apache.hadoop.hive.ql.parse; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; + +import com.google.common.collect.ImmutableList; + +public class JoinTypeCheckCtx implements NodeProcessorCtx { + /** + * Potential typecheck error reason. + */ + private String error; + + /** + * The node that generated the potential typecheck error + */ + private ASTNode errorSrcNode; + + private final ImmutableList m_inputRRLst; + + public JoinTypeCheckCtx(RowResolver... inputRRLst) { + m_inputRRLst = new ImmutableList.Builder().addAll(Arrays.asList(inputRRLst)).build(); + } + + /** + * @return the inputRR List + */ + public List getInputRRList() { + return m_inputRRLst; + } + + /** + * @param error + * the error to set + * + */ + public void setError(String error, ASTNode errorSrcNode) { + this.error = error; + this.errorSrcNode = errorSrcNode; + } + + /** + * @return the error + */ + public String getError() { + return error; + } + + public ASTNode getErrorSrcNode() { + return errorSrcNode; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index 52c39c0..a24cad9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -131,7 +131,7 @@ public String getErrorMessage(RecognitionException e, String[] tokenNames) { * so that the graph walking algorithms and the rules framework defined in * ql.lib can be used with the AST Nodes. */ - static final TreeAdaptor adaptor = new CommonTreeAdaptor() { + public static final TreeAdaptor adaptor = new CommonTreeAdaptor() { /** * Creates an ASTNode for the given token. The ASTNode is a wrapper around * antlr's CommonTree class that implements the Node interface. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index 0a1c660..208f133 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -110,7 +110,7 @@ private ParseUtils() { * @param tableFieldTypeInfo TypeInfo to convert to * @return Expression converting column to the type specified by tableFieldTypeInfo */ - static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo tableFieldTypeInfo) + public static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo tableFieldTypeInfo) throws SemanticException { // Get base type, since type string may be parameterized String baseType = TypeInfoUtils.getBaseName(tableFieldTypeInfo.getTypeName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java index 2e58b80..22295a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java @@ -351,4 +351,72 @@ public void setExpressionMap(Map expressionMap) { this.expressionMap = expressionMap; } + + // TODO: 1) How to handle collisions? 2) Should we be cloning ColumnInfo or + // not? + public static int add(RowResolver rrToAddTo, RowResolver rrToAddFrom, + int outputColPos, int numColumns) throws SemanticException { + String tabAlias; + String colAlias; + String[] qualifiedColName; + int i = 0; + + for (ColumnInfo cInfoFrmInput : rrToAddFrom.getRowSchema().getSignature()) { + if ( numColumns >= 0 && i == numColumns ) { + break; + } + ColumnInfo newCI = null; + qualifiedColName = rrToAddFrom.getInvRslvMap().get( + cInfoFrmInput.getInternalName()); + tabAlias = qualifiedColName[0]; + colAlias = qualifiedColName[1]; + + newCI = new ColumnInfo(cInfoFrmInput); + newCI.setInternalName(SemanticAnalyzer + .getColumnInternalName(outputColPos)); + + outputColPos++; + + if (rrToAddTo.get(tabAlias, colAlias) != null) + throw new RuntimeException("Ambigous Column Names"); + + rrToAddTo.put(tabAlias, colAlias, newCI); + + qualifiedColName = rrToAddFrom.getAlternateMappings(cInfoFrmInput + .getInternalName()); + if (qualifiedColName != null) { + tabAlias = qualifiedColName[0]; + colAlias = qualifiedColName[1]; + rrToAddTo.put(tabAlias, colAlias, newCI); + } + i++; + } + + return outputColPos; + } + + public static int add(RowResolver rrToAddTo, RowResolver rrToAddFrom, + int outputColPos) throws SemanticException { + return add(rrToAddTo, rrToAddFrom, outputColPos, -1); + } + + /** + * Return a new row resolver that is combination of left RR and right RR. + * The schema will be schema of left, schema of right + * + * @param leftRR + * @param rightRR + * @return + * @throws SemanticException + */ + public static RowResolver getCombinedRR(RowResolver leftRR, + RowResolver rightRR) throws SemanticException { + int outputColPos = 0; + + RowResolver combinedRR = new RowResolver(); + outputColPos = add(combinedRR, leftRR, outputColPos); + outputColPos = add(combinedRR, rightRR, outputColPos); + + return combinedRR; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index e4a30a2..5d41d22 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -22,8 +22,10 @@ import java.io.IOException; import java.io.Serializable; +import java.math.BigDecimal; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -34,9 +36,14 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; +import net.hydromatic.optiq.SchemaPlus; +import net.hydromatic.optiq.tools.Frameworks; + import org.antlr.runtime.tree.Tree; import org.antlr.runtime.tree.TreeWizard; import org.antlr.runtime.tree.TreeWizard.ContextVisitor; @@ -104,6 +111,26 @@ import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.Optimizer; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveDefaultRelMetadataProvider; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.Pair; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveVolcanoPlanner; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveUnionRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePartitionPrunerRule; +import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePushFilterPastJoinRule; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ASTConverter; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.RexNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.TypeConverter; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; @@ -195,6 +222,65 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapred.InputFormat; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.Aggregation; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelCollationImpl; +import org.eigenbase.rel.RelFactories; +import org.eigenbase.rel.RelFieldCollation; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.CachingRelMetadataProvider; +import org.eigenbase.rel.metadata.ChainedRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.rules.ConvertMultiJoinRule; +import org.eigenbase.rel.rules.LoptOptimizeJoinRule; +import org.eigenbase.rel.rules.MergeFilterRule; +import org.eigenbase.rel.rules.PushFilterPastProjectRule; +import org.eigenbase.rel.rules.PushFilterPastSetOpRule; +import org.eigenbase.rel.rules.RemoveTrivialProjectRule; +import org.eigenbase.rel.rules.SemiJoinRel; +import org.eigenbase.rel.rules.TransitivePredicatesOnJoinRule; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelOptQuery; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptSchema; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.relopt.hep.HepMatchOrder; +import org.eigenbase.relopt.hep.HepPlanner; +import org.eigenbase.relopt.hep.HepProgram; +import org.eigenbase.relopt.hep.HepProgramBuilder; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexWindowBound; +import org.eigenbase.rex.RexFieldCollation; +import org.eigenbase.sql.SqlAggFunction; +import org.eigenbase.sql.SqlWindow; +import org.eigenbase.sql.parser.SqlParserPos; +import org.eigenbase.sql.type.SqlTypeName; +import org.eigenbase.sql2rel.RelFieldTrimmer; +import org.eigenbase.sql.SqlCall; +import org.eigenbase.sql.SqlExplainLevel; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlNode; +import org.eigenbase.sql.SqlLiteral; +import org.eigenbase.util.CompositeList; +import org.eigenbase.util.ImmutableIntList; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; /** * Implementation of the semantic analyzer. It generates the query plan. @@ -261,6 +347,9 @@ //flag for partial scan during analyze ... compute statistics protected boolean partialscan = false; + private volatile boolean runCBO = true; + private volatile boolean disableJoinMerge = false; + /* * Capture the CTE definitions in a Query. */ @@ -275,6 +364,11 @@ int nextNum; } + protected SemanticAnalyzer(HiveConf conf, boolean runCBO) throws SemanticException { + this(conf); + this.runCBO = runCBO; + } + public SemanticAnalyzer(HiveConf conf) throws SemanticException { super(conf); @@ -309,8 +403,11 @@ public SemanticAnalyzer(HiveConf conf) throws SemanticException { } @Override - protected void reset() { - super.reset(); + protected void reset(boolean clearPartsCache) { + super.reset(true); + if(clearPartsCache) { + prunedPartitions.clear(); + } loadTableWork.clear(); loadFileWork.clear(); topOps.clear(); @@ -324,8 +421,28 @@ protected void reset() { smbMapJoinContext.clear(); opParseCtx.clear(); groupOpToInputTables.clear(); - prunedPartitions.clear(); + disableJoinMerge = false; aliasToCTEs.clear(); + topToTable.clear(); + opToPartPruner.clear(); + opToPartList.clear(); + opToPartToSkewedPruner.clear(); + opToSamplePruner.clear(); + nameToSplitSample.clear(); + fsopToTable.clear(); + resultSchema = null; + createVwDesc = null; + viewsExpanded = null; + viewSelect = null; + ctesExpanded = null; + globalLimitCtx.disableOpt(); + viewAliasToInput.clear(); + reduceSinkOperatorsAddedByEnforceBucketingSorting.clear(); + topToTableProps.clear(); + listMapJoinOpsNoReducer.clear(); + unparseTranslator.clear(); + queryProperties.clear(); + outputs.clear(); } public void initParseCtx(ParseContext pctx) { @@ -789,9 +906,7 @@ private void addCTEAsSubQuery(QB qb, String cteName, String cteAlias) throws Sem private boolean isJoinToken(ASTNode node) { if ((node.getToken().getType() == HiveParser.TOK_JOIN) || (node.getToken().getType() == HiveParser.TOK_CROSSJOIN) - || (node.getToken().getType() == HiveParser.TOK_LEFTOUTERJOIN) - || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN) - || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN) + || isOuterJoinToken(node) || (node.getToken().getType() == HiveParser.TOK_LEFTSEMIJOIN) || (node.getToken().getType() == HiveParser.TOK_UNIQUEJOIN)) { return true; @@ -800,6 +915,12 @@ private boolean isJoinToken(ASTNode node) { return false; } + private boolean isOuterJoinToken(ASTNode node) { + return (node.getToken().getType() == HiveParser.TOK_LEFTOUTERJOIN) + || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN) + || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN); + } + /** * Given the AST with TOK_JOIN as the root, get all the aliases for the tables * or subqueries in the join. @@ -817,6 +938,7 @@ private void processJoin(QB qb, ASTNode join) throws SemanticException { "Join with multiple children")); } + queryProperties.incrementJoinCount(isOuterJoinToken(join)); for (int num = 0; num < numChildren; num++) { ASTNode child = (ASTNode) join.getChild(num); if (child.getToken().getType() == HiveParser.TOK_TABREF) { @@ -923,10 +1045,15 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) qb.countSel(); qbp.setSelExprForClause(ctx_1.dest, ast); + int posn = 0; if (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_HINTLIST) { qbp.setHints((ASTNode) ast.getChild(0)); + posn++; } + if ((ast.getChild(posn).getChild(0).getType() == HiveParser.TOK_TRANSFORM)) + queryProperties.setUsesScript(true); + LinkedHashMap aggregations = doPhase1GetAggregationsFromSelect(ast, qb, ctx_1.dest); doPhase1GetColumnAliasesFromSelect(ast, qbp); @@ -937,6 +1064,8 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) case HiveParser.TOK_WHERE: qbp.setWhrExprForClause(ctx_1.dest, ast); + if (!SubQueryUtils.findSubQueries((ASTNode) ast.getChild(0)).isEmpty()) + queryProperties.setFilterWithSubQuery(true); break; case HiveParser.TOK_INSERT_INTO: @@ -959,6 +1088,9 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) } } qbp.setDestForClause(ctx_1.dest, (ASTNode) ast.getChild(0)); + + if (qbp.getClauseNamesForDest().size() > 1) + queryProperties.setMultiDestQuery(true); break; case HiveParser.TOK_FROM: @@ -980,7 +1112,6 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) frm.getToken().getType() == HiveParser.TOK_LATERAL_VIEW_OUTER) { processLateralView(qb, frm); } else if (isJoinToken(frm)) { - queryProperties.setHasJoin(true); processJoin(qb, frm); qbp.setJoinExpr(frm); }else if(frm.getToken().getType() == HiveParser.TOK_PTBLFUNCTION){ @@ -1197,6 +1328,10 @@ private void getMetaData(QBExpr qbexpr, ReadEntity parentInput) } } + public Table getTable(TableScanOperator ts) { + return topToTable.get(ts); + } + public void getMetaData(QB qb) throws SemanticException { getMetaData(qb, null); } @@ -9169,7 +9304,9 @@ public Operator genPlan(QB qb) throws SemanticException { aliasToOpInfo ); } } - mergeJoinTree(qb); + + if (!disableJoinMerge) + mergeJoinTree(qb); } // if any filters are present in the join tree, push them on top of the @@ -9377,9 +9514,9 @@ public Phase1Ctx initPhase1Ctx() { } @Override - public void init() { + public void init(boolean clearPartsCache) { // clear most members - reset(); + reset(clearPartsCache); // init QB qb = new QB(null, null, false); @@ -9434,11 +9571,78 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { getMetaData(qb); LOG.info("Completed getting MetaData in Semantic Analysis"); + + if (runCBO) { + boolean tokenTypeIsQuery = ast.getToken().getType() == HiveParser.TOK_QUERY + || ast.getToken().getType() == HiveParser.TOK_EXPLAIN; + if (!tokenTypeIsQuery || createVwDesc != null + || !HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_ENABLED) + || !canHandleQuery()) { + runCBO = false; + } + + if (runCBO) { + disableJoinMerge = true; + } + } + // Save the result schema derived from the sink operator produced // by genPlan. This has the correct column names, which clients // such as JDBC would prefer instead of the c0, c1 we'll end // up with later. - Operator sinkOp = genPlan(qb); + Operator sinkOp = null; + + if (runCBO) { + OptiqBasedPlanner optiqPlanner = new OptiqBasedPlanner(); + boolean reAnalyzeAST = false; + + try { + // 1. Gen Optimized AST + ASTNode newAST = optiqPlanner.getOptimizedAST(prunedPartitions); + + // 2. Regen OP plan from optimized AST + init(false); + ctx_1 = initPhase1Ctx(); + if (!doPhase1(newAST, qb, ctx_1)) { + throw new RuntimeException( + "Couldn't do phase1 on CBO optimized query plan"); + } + // unfortunately making prunedPartitions immutable is not possible here + // with SemiJoins not all tables are costed in CBO, + // so their PartitionList is not evaluated until the run phase. + //prunedPartitions = ImmutableMap.copyOf(prunedPartitions); + getMetaData(qb); + + disableJoinMerge = true; + sinkOp = genPlan(qb); + LOG.info("CBO Succeeded; optimized logical plan."); + LOG.debug(newAST.dump()); + + /* + * Use non CBO Result Set Schema so as to preserve user specified names. + * Hive seems to have bugs with OB/LIMIT in sub queries. // 3. Reset + * result set schema resultSchema = + * convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp) + * .getRowResolver(), true); + */ + } catch (Exception e) { + LOG.error("CBO failed, skipping CBO. ", e); + if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || (optiqPlanner.noColsMissingStats.get() > 0)) { + reAnalyzeAST = true; + } + } finally { + runCBO = false; + disableJoinMerge = false; + if (reAnalyzeAST) { + init(true); + prunedPartitions.clear(); + analyzeInternal(ast); + return; + } + } + } else { + sinkOp = genPlan(qb); + } if (createVwDesc != null) resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver()); @@ -11633,4 +11837,1768 @@ private void addAlternateGByKeyMappings(ASTNode gByExpr, ColumnInfo colInfo, else return (ltd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT); } + + /**** Temporary Place Holder For Optiq plan Gen, Optimizer ****/ + + /* + * Entry point to Optimizations using Optiq. + */ + + // TODO: Extend QP to indicate LV, Multi Insert, Cubes, Rollups... + private boolean canHandleQuery() { + boolean runOptiqPlanner = false; + // Assumption: If top level QB is query then everything below it must also + // be Query + if (qb.getIsQuery() + && ((queryProperties.getJoinCount() > 1) || conf.getBoolVar(ConfVars.HIVE_IN_TEST)) + && !queryProperties.hasClusterBy() && !queryProperties.hasDistributeBy() + && !queryProperties.hasSortBy() && !queryProperties.hasPTF() + && !queryProperties.usesScript() && !queryProperties.hasMultiDestQuery()) { + runOptiqPlanner = true; + } else { + LOG.info("Can not invoke CBO; query contains operators not supported for CBO."); + } + + return runOptiqPlanner; + } + + private class OptiqBasedPlanner implements Frameworks.PlannerAction { + RelOptCluster m_cluster; + RelOptSchema m_relOptSchema; + SemanticException m_semanticException; + Map partitionCache; + AtomicInteger noColsMissingStats = new AtomicInteger(0); + + // TODO: Do we need to keep track of RR, ColNameToPosMap for every op or + // just last one. + LinkedHashMap m_relToHiveRR = new LinkedHashMap(); + LinkedHashMap> m_relToHiveColNameOptiqPosMap = new LinkedHashMap>(); + + private ASTNode getOptimizedAST(Map partitionCache) + throws SemanticException { + ASTNode optiqOptimizedAST = null; + RelNode optimizedOptiqPlan = null; + this.partitionCache = partitionCache; + + try { + optimizedOptiqPlan = Frameworks.withPlanner(this); + } catch (Exception e) { + if (m_semanticException != null) + throw m_semanticException; + else + throw new RuntimeException(e); + } + optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema); + + return optiqOptimizedAST; + } + + @Override + public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlus rootSchema) { + RelNode optiqGenPlan = null; + RelNode optiqPreCboPlan = null; + RelNode optiqOptimizedPlan = null; + /* + * recreate cluster, so that it picks up the additional traitDef + */ + RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(); + final RelOptQuery query = new RelOptQuery(planner); + final RexBuilder rexBuilder = cluster.getRexBuilder(); + cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder); + + m_cluster = cluster; + m_relOptSchema = relOptSchema; + + try { + optiqGenPlan = genLogicalPlan(qb); + } catch (SemanticException e) { + m_semanticException = e; + throw new RuntimeException(e); + } + + optiqPreCboPlan = applyPreCBOTransforms(optiqGenPlan, HiveDefaultRelMetadataProvider.INSTANCE); + List list = Lists.newArrayList(); + list.add(HiveDefaultRelMetadataProvider.INSTANCE); + RelTraitSet desiredTraits = cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + + HepProgram hepPgm = null; + HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP) + .addRuleInstance(new ConvertMultiJoinRule(HiveJoinRel.class)); + hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveJoinRel.HIVE_JOIN_FACTORY, + HiveProjectRel.DEFAULT_PROJECT_FACTORY, HiveFilterRel.DEFAULT_FILTER_FACTORY)); + + hepPgm = hepPgmBldr.build(); + HepPlanner hepPlanner = new HepPlanner(hepPgm); + + hepPlanner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); + + RelNode rootRel = optiqPreCboPlan; + hepPlanner.setRoot(rootRel); + if (!optiqPreCboPlan.getTraitSet().equals(desiredTraits)) { + rootRel = hepPlanner.changeTraits(optiqPreCboPlan, desiredTraits); + } + hepPlanner.setRoot(rootRel); + + optiqOptimizedPlan = hepPlanner.findBestExp(); + + if (LOG.isDebugEnabled() && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) { + LOG.debug("CBO Planning details:\n"); + LOG.debug("Original Plan:\n"); + LOG.debug(RelOptUtil.toString(optiqGenPlan, SqlExplainLevel.ALL_ATTRIBUTES)); + LOG.debug("Plan After PPD, PartPruning, ColumnPruning:\n"); + LOG.debug(RelOptUtil.toString(optiqPreCboPlan, SqlExplainLevel.ALL_ATTRIBUTES)); + LOG.debug("Plan After Join Reordering:\n"); + LOG.debug(RelOptUtil.toString(optiqOptimizedPlan, SqlExplainLevel.ALL_ATTRIBUTES)); + } + + return optiqOptimizedPlan; + } + + public RelNode applyPreCBOTransforms(RelNode basePlan, RelMetadataProvider mdProvider) { + + // TODO: Decorelation of subquery should be done before attempting + // Partition Pruning; otherwise Expression evaluation may try to execute + // corelated sub query. + basePlan = hepPlan(basePlan, true, mdProvider, new PushFilterPastProjectRule( + FilterRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class, + HiveProjectRel.DEFAULT_PROJECT_FACTORY), new PushFilterPastSetOpRule( + HiveFilterRel.DEFAULT_FILTER_FACTORY), new MergeFilterRule( + HiveFilterRel.DEFAULT_FILTER_FACTORY), HivePushFilterPastJoinRule.JOIN, + HivePushFilterPastJoinRule.FILTER_ON_JOIN); + + basePlan = hepPlan(basePlan, false, mdProvider, new TransitivePredicatesOnJoinRule( + JoinRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY), + RemoveTrivialProjectRule.INSTANCE, + new HivePartitionPrunerRule(SemanticAnalyzer.this.conf)); + + RelFieldTrimmer fieldTrimmer = new RelFieldTrimmer(null, HiveProjectRel.DEFAULT_PROJECT_FACTORY, + HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveJoinRel.HIVE_JOIN_FACTORY, RelFactories.DEFAULT_SEMI_JOIN_FACTORY, + HiveSortRel.HIVE_SORT_REL_FACTORY, HiveAggregateRel.HIVE_AGGR_REL_FACTORY, HiveUnionRel.UNION_REL_FACTORY); + basePlan = fieldTrimmer.trim(basePlan); + + return basePlan; + } + + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, RelOptRule... rules) { + + RelNode optimizedRelNode = basePlan; + HepProgramBuilder programBuilder = new HepProgramBuilder(); + if (followPlanChanges) { + programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN); + programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules)); + } else { + // TODO: Should this be also TOP_DOWN? + for (RelOptRule r : rules) + programBuilder.addRuleInstance(r); + } + + HepPlanner planner = new HepPlanner(programBuilder.build()); + List list = Lists.newArrayList(); + list.add(mdProvider); + planner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + basePlan.getCluster().setMetadataProvider( + new CachingRelMetadataProvider(chainedProvider, planner)); + + planner.setRoot(basePlan); + optimizedRelNode = planner.findBestExp(); + + return optimizedRelNode; + } + + @SuppressWarnings("nls") + private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode leftRel, + String rightalias, RelNode rightRel) throws SemanticException { + HiveUnionRel unionRel = null; + + // 1. Get Row Resolvers, Column map for original left and right input of + // Union Rel + RowResolver leftRR = this.m_relToHiveRR.get(leftRel); + RowResolver rightRR = this.m_relToHiveRR.get(rightRel); + HashMap leftmap = leftRR.getFieldMap(leftalias); + HashMap rightmap = rightRR.getFieldMap(rightalias); + + // 2. Validate that Union is feasible according to Hive (by using type + // info from RR) + if (leftmap.size() != rightmap.size()) { + throw new SemanticException("Schema of both sides of union should match."); + } + + ASTNode tabref = qb.getAliases().isEmpty() ? null : qb.getParseInfo().getSrcForAlias( + qb.getAliases().get(0)); + for (Map.Entry lEntry : leftmap.entrySet()) { + String field = lEntry.getKey(); + ColumnInfo lInfo = lEntry.getValue(); + ColumnInfo rInfo = rightmap.get(field); + if (rInfo == null) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match. " + rightalias + + " does not have the field " + field)); + } + if (lInfo == null) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match. " + leftalias + + " does not have the field " + field)); + } + if (!lInfo.getInternalName().equals(rInfo.getInternalName())) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match: field " + field + ":" + + " appears on the left side of the UNION at column position: " + + getPositionFromInternalName(lInfo.getInternalName()) + + ", and on the right side of the UNION at column position: " + + getPositionFromInternalName(rInfo.getInternalName()) + + ". Column positions should match for a UNION")); + } + // try widening coversion, otherwise fail union + TypeInfo commonTypeInfo = FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(), + rInfo.getType()); + if (commonTypeInfo == null) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match: Column " + field + " is of type " + + lInfo.getType().getTypeName() + " on first table and type " + + rInfo.getType().getTypeName() + " on second table")); + } + } + + // 3. construct Union Output RR using original left & right Input + RowResolver unionoutRR = new RowResolver(); + for (Map.Entry lEntry : leftmap.entrySet()) { + String field = lEntry.getKey(); + ColumnInfo lInfo = lEntry.getValue(); + ColumnInfo rInfo = rightmap.get(field); + ColumnInfo unionColInfo = new ColumnInfo(lInfo); + unionColInfo.setTabAlias(unionalias); + unionColInfo.setType(FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(), + rInfo.getType())); + unionoutRR.put(unionalias, field, unionColInfo); + } + + // 4. Determine which columns requires cast on left/right input (Optiq + // requires exact types on both sides of union) + boolean leftNeedsTypeCast = false; + boolean rightNeedsTypeCast = false; + List leftProjs = new ArrayList(); + List rightProjs = new ArrayList(); + List leftRowDT = leftRel.getRowType().getFieldList(); + List rightRowDT = rightRel.getRowType().getFieldList(); + + RelDataType leftFieldDT; + RelDataType rightFieldDT; + RelDataType unionFieldDT; + List tmpDTLst = new ArrayList(); + for (int i = 0; i < leftRowDT.size(); i++) { + leftFieldDT = leftRowDT.get(i).getType(); + rightFieldDT = rightRowDT.get(i).getType(); + if (!leftFieldDT.equals(rightFieldDT)) { + tmpDTLst.clear(); + tmpDTLst.add(leftFieldDT); + tmpDTLst.add(rightFieldDT); + unionFieldDT = m_cluster.getTypeFactory().leastRestrictive(tmpDTLst); + + if (!unionFieldDT.equals(leftFieldDT)) + leftNeedsTypeCast = true; + leftProjs.add(m_cluster.getRexBuilder().ensureType(unionFieldDT, + m_cluster.getRexBuilder().makeInputRef(leftFieldDT, i), true)); + + if (!unionFieldDT.equals(rightFieldDT)) + rightNeedsTypeCast = true; + rightProjs.add(m_cluster.getRexBuilder().ensureType(unionFieldDT, + m_cluster.getRexBuilder().makeInputRef(rightFieldDT, i), true)); + } else { + leftProjs.add(m_cluster.getRexBuilder().ensureType(leftFieldDT, + m_cluster.getRexBuilder().makeInputRef(leftFieldDT, i), true)); + rightProjs.add(m_cluster.getRexBuilder().ensureType(rightFieldDT, + m_cluster.getRexBuilder().makeInputRef(rightFieldDT, i), true)); + } + } + + // 5. Introduce Project Rel above original left/right inputs if cast is + // needed for type parity + RelNode unionLeftInput = leftRel; + RelNode unionRightInput = rightRel; + if (leftNeedsTypeCast) { + unionLeftInput = HiveProjectRel.create(leftRel, leftProjs, leftRel.getRowType() + .getFieldNames()); + } + if (rightNeedsTypeCast) { + unionRightInput = HiveProjectRel.create(rightRel, rightProjs, rightRel.getRowType() + .getFieldNames()); + } + + // 6. Construct Union Rel + ImmutableList.Builder bldr = new ImmutableList.Builder(); + bldr.add(unionLeftInput); + bldr.add(unionRightInput); + unionRel = new HiveUnionRel(m_cluster, TraitsUtil.getUnionTraitSet(m_cluster, null), + bldr.build()); + + m_relToHiveRR.put(unionRel, unionoutRR); + m_relToHiveColNameOptiqPosMap.put(unionRel, + this.buildHiveToOptiqColumnMap(unionoutRR, unionRel)); + + return unionRel; + } + + private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJoinType, + ASTNode joinCond) throws SemanticException { + RelNode joinRel = null; + + // 1. construct the RowResolver for the new Join Node by combining row + // resolvers from left, right + RowResolver leftRR = this.m_relToHiveRR.get(leftRel); + RowResolver rightRR = this.m_relToHiveRR.get(rightRel); + RowResolver joinRR = null; + + if (hiveJoinType != JoinType.LEFTSEMI) { + joinRR = RowResolver.getCombinedRR(leftRR, rightRR); + } else { + joinRR = new RowResolver(); + RowResolver.add(joinRR, leftRR, 0); + } + + // 2. Construct ExpressionNodeDesc representing Join Condition + RexNode optiqJoinCond = null; + if (joinCond != null) { + Map exprNodes = JoinCondnTypeCheckProcFactory.genExprNode(joinCond, + new JoinTypeCheckCtx(leftRR, rightRR)); + + ExprNodeDesc joinCondnExprNode = exprNodes.get(joinCond); + + List inputRels = new ArrayList(); + inputRels.add(leftRel); + inputRels.add(rightRel); + optiqJoinCond = RexNodeConverter.convert(m_cluster, joinCondnExprNode, inputRels, + m_relToHiveRR, m_relToHiveColNameOptiqPosMap, false); + } else { + optiqJoinCond = m_cluster.getRexBuilder().makeLiteral(true); + } + + // 3. Validate that join condition is legal (i.e no function refering to + // both sides of join, only equi join) + // TODO: Join filter handling (only supported for OJ by runtime or is it + // supported for IJ as well) + + // 4. Construct Join Rel Node + boolean leftSemiJoin = false; + JoinRelType optiqJoinType; + switch (hiveJoinType) { + case LEFTOUTER: + optiqJoinType = JoinRelType.LEFT; + break; + case RIGHTOUTER: + optiqJoinType = JoinRelType.RIGHT; + break; + case FULLOUTER: + optiqJoinType = JoinRelType.FULL; + break; + case LEFTSEMI: + optiqJoinType = JoinRelType.INNER; + leftSemiJoin = true; + break; + case INNER: + default: + optiqJoinType = JoinRelType.INNER; + break; + } + + if (leftSemiJoin) { + List sysFieldList = new ArrayList(); + List leftJoinKeys = new ArrayList(); + List rightJoinKeys = new ArrayList(); + + RexNode nonEquiConds = RelOptUtil.splitJoinCondition(sysFieldList, leftRel, rightRel, + optiqJoinCond, leftJoinKeys, rightJoinKeys, null, null); + + if (!nonEquiConds.isAlwaysTrue()) { + throw new SemanticException("Non equality condition not supported in Semi-Join" + + nonEquiConds); + } + + RelNode[] inputRels = new RelNode[] { leftRel, rightRel }; + final List leftKeys = new ArrayList(); + final List rightKeys = new ArrayList(); + optiqJoinCond = HiveOptiqUtil.projectNonColumnEquiConditions( + HiveProjectRel.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0, + leftKeys, rightKeys); + + joinRel = new SemiJoinRel(m_cluster, m_cluster.traitSetOf(HiveRel.CONVENTION), + inputRels[0], inputRels[1], optiqJoinCond, ImmutableIntList.copyOf(leftKeys), + ImmutableIntList.copyOf(rightKeys)); + } else { + joinRel = HiveJoinRel.getJoin(m_cluster, leftRel, rightRel, optiqJoinCond, optiqJoinType, + leftSemiJoin); + } + // 5. Add new JoinRel & its RR to the maps + m_relToHiveColNameOptiqPosMap.put(joinRel, this.buildHiveToOptiqColumnMap(joinRR, joinRel)); + m_relToHiveRR.put(joinRel, joinRR); + + return joinRel; + } + + /** + * Generate Join Logical Plan Relnode by walking through the join AST. + * + * @param qb + * @param aliasToRel + * Alias(Table/Relation alias) to RelNode; only read and not + * written in to by this method + * @return + * @throws SemanticException + */ + private RelNode genJoinLogicalPlan(ASTNode joinParseTree, Map aliasToRel) + throws SemanticException { + RelNode leftRel = null; + RelNode rightRel = null; + JoinType hiveJoinType = null; + + if (joinParseTree.getToken().getType() == HiveParser.TOK_UNIQUEJOIN) { + throw new RuntimeException("CBO does not support Unique Join"); + } + + // 1. Determine Join Type + // TODO: What about TOK_CROSSJOIN, TOK_MAPJOIN + switch (joinParseTree.getToken().getType()) { + case HiveParser.TOK_LEFTOUTERJOIN: + hiveJoinType = JoinType.LEFTOUTER; + break; + case HiveParser.TOK_RIGHTOUTERJOIN: + hiveJoinType = JoinType.RIGHTOUTER; + break; + case HiveParser.TOK_FULLOUTERJOIN: + hiveJoinType = JoinType.FULLOUTER; + break; + case HiveParser.TOK_LEFTSEMIJOIN: + hiveJoinType = JoinType.LEFTSEMI; + break; + default: + hiveJoinType = JoinType.INNER; + break; + } + + // 2. Get Left Table Alias + ASTNode left = (ASTNode) joinParseTree.getChild(0); + if ((left.getToken().getType() == HiveParser.TOK_TABREF) + || (left.getToken().getType() == HiveParser.TOK_SUBQUERY) + || (left.getToken().getType() == HiveParser.TOK_PTBLFUNCTION)) { + String tableName = getUnescapedUnqualifiedTableName((ASTNode) left.getChild(0)) + .toLowerCase(); + String leftTableAlias = left.getChildCount() == 1 ? tableName : unescapeIdentifier(left + .getChild(left.getChildCount() - 1).getText().toLowerCase()); + // ptf node form is: ^(TOK_PTBLFUNCTION $name $alias? + // partitionTableFunctionSource partitioningSpec? expression*) + // guranteed to have an lias here: check done in processJoin + leftTableAlias = (left.getToken().getType() == HiveParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(left + .getChild(1).getText().toLowerCase()) + : leftTableAlias; + leftRel = aliasToRel.get(leftTableAlias); + } else if (isJoinToken(left)) { + leftRel = genJoinLogicalPlan(left, aliasToRel); + } else { + assert (false); + } + + // 3. Get Right Table Alias + ASTNode right = (ASTNode) joinParseTree.getChild(1); + if ((right.getToken().getType() == HiveParser.TOK_TABREF) + || (right.getToken().getType() == HiveParser.TOK_SUBQUERY) + || (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION)) { + String tableName = getUnescapedUnqualifiedTableName((ASTNode) right.getChild(0)) + .toLowerCase(); + String rightTableAlias = right.getChildCount() == 1 ? tableName : unescapeIdentifier(right + .getChild(right.getChildCount() - 1).getText().toLowerCase()); + // ptf node form is: ^(TOK_PTBLFUNCTION $name $alias? + // partitionTableFunctionSource partitioningSpec? expression*) + // guranteed to have an lias here: check done in processJoin + rightTableAlias = (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(right + .getChild(1).getText().toLowerCase()) + : rightTableAlias; + rightRel = aliasToRel.get(rightTableAlias); + } else { + assert (false); + } + + // 4. Get Join Condn + ASTNode joinCond = (ASTNode) joinParseTree.getChild(2); + + // 5. Create Join rel + return genJoinRelNode(leftRel, rightRel, hiveJoinType, joinCond); + } + + private RelNode genTableLogicalPlan(String tableAlias, QB qb) { + RowResolver rr = new RowResolver(); + HiveTableScanRel tableRel = null; + + try { + // 1. Get Table Alias + String alias_id = getAliasId(tableAlias, qb); + + // 2. Get Table Metadata + Table tab = qb.getMetaData().getSrcForAlias(tableAlias); + + // 3. Get Table Logical Schema (Row Type) + // NOTE: Table logical schema = Non Partition Cols + Partition Cols + + // Virtual Cols + + // 3.1 Add Column info for non partion cols (Object Inspector fields) + StructObjectInspector rowObjectInspector = (StructObjectInspector) tab.getDeserializer() + .getObjectInspector(); + List fields = rowObjectInspector.getAllStructFieldRefs(); + ColumnInfo colInfo; + String colName; + ArrayList cInfoLst = new ArrayList(); + for (int i = 0; i < fields.size(); i++) { + colName = fields.get(i).getFieldName(); + colInfo = new ColumnInfo( + fields.get(i).getFieldName(), + TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), + tableAlias, false); + colInfo.setSkewedCol((isSkewedCol(tableAlias, qb, colName)) ? true : false); + rr.put(tableAlias, colName, colInfo); + cInfoLst.add(colInfo); + } + // TODO: Fix this + ArrayList nonPartitionColumns = new ArrayList(cInfoLst); + ArrayList partitionColumns = new ArrayList(); + + // 3.2 Add column info corresponding to partition columns + for (FieldSchema part_col : tab.getPartCols()) { + colName = part_col.getName(); + colInfo = new ColumnInfo(colName, + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), tableAlias, true); + rr.put(tableAlias, colName, colInfo); + cInfoLst.add(colInfo); + partitionColumns.add(colInfo); + } + + // 3.3 Add column info corresponding to virtual columns + Iterator vcs = VirtualColumn.getRegistry(conf).iterator(); + while (vcs.hasNext()) { + VirtualColumn vc = vcs.next(); + colInfo = new ColumnInfo(vc.getName(), vc.getTypeInfo(), tableAlias, true, + vc.getIsHidden()); + rr.put(tableAlias, vc.getName(), colInfo); + cInfoLst.add(colInfo); + } + + // 3.4 Build row type from field + RelDataType rowType = TypeConverter.getType(m_cluster, rr, null); + + // 4. Build RelOptAbstractTable + RelOptHiveTable optTable = new RelOptHiveTable(m_relOptSchema, tableAlias, rowType, tab, + nonPartitionColumns, partitionColumns, conf, partitionCache, noColsMissingStats); + + // 5. Build Hive Table Scan Rel + tableRel = new HiveTableScanRel(m_cluster, m_cluster.traitSetOf(HiveRel.CONVENTION), + optTable, rowType); + + // 6. Add Schema(RR) to RelNode-Schema map + ImmutableMap hiveToOptiqColMap = buildHiveToOptiqColumnMap(rr, tableRel); + m_relToHiveRR.put(tableRel, rr); + m_relToHiveColNameOptiqPosMap.put(tableRel, hiveToOptiqColMap); + } catch (Exception e) { + throw (new RuntimeException(e)); + } + + return tableRel; + } + + private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel) throws SemanticException { + ExprNodeDesc filterCondn = genExprNodeDesc(filterExpr, m_relToHiveRR.get(srcRel)); + ImmutableMap hiveColNameOptiqPosMap = this.m_relToHiveColNameOptiqPosMap + .get(srcRel); + RexNode convertedFilterExpr = new RexNodeConverter(m_cluster, srcRel.getRowType(), + hiveColNameOptiqPosMap, 0, true).convert(filterCondn); + RelNode filterRel = new HiveFilterRel(m_cluster, m_cluster.traitSetOf(HiveRel.CONVENTION), + srcRel, convertedFilterExpr); + this.m_relToHiveColNameOptiqPosMap.put(filterRel, hiveColNameOptiqPosMap); + m_relToHiveRR.put(filterRel, m_relToHiveRR.get(srcRel)); + m_relToHiveColNameOptiqPosMap.put(filterRel, hiveColNameOptiqPosMap); + + return filterRel; + } + + private RelNode genFilterRelNode(QB qb, ASTNode searchCond, RelNode srcRel, + Map aliasToRel, boolean forHavingClause) throws SemanticException { + /* + * Handle Subquery predicates. + * + * Notes (8/22/14 hb): Why is this a copy of the code from {@link + * #genFilterPlan} - for now we will support the same behavior as non CBO + * route. - but plan to allow nested SubQueries(Restriction.9.m) and + * multiple SubQuery expressions(Restriction.8.m). This requires use to + * utilize Optiq's Decorrelation mechanics, and for Optiq to fix/flush out + * Null semantics(OPTIQ-373) - besides only the driving code has been + * copied. Most of the code which is SubQueryUtils and QBSubQuery is + * reused. + */ + int numSrcColumns = srcRel.getRowType().getFieldCount(); + List subQueriesInOriginalTree = SubQueryUtils.findSubQueries(searchCond); + if (subQueriesInOriginalTree.size() > 0) { + + /* + * Restriction.9.m :: disallow nested SubQuery expressions. + */ + if (qb.getSubQueryPredicateDef() != null) { + throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg( + subQueriesInOriginalTree.get(0), "Nested SubQuery expressions are not supported.")); + } + + /* + * Restriction.8.m :: We allow only 1 SubQuery expression per Query. + */ + if (subQueriesInOriginalTree.size() > 1) { + + throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg( + subQueriesInOriginalTree.get(1), "Only 1 SubQuery expression is supported.")); + } + + /* + * Clone the Search AST; apply all rewrites on the clone. + */ + ASTNode clonedSearchCond = (ASTNode) SubQueryUtils.adaptor.dupTree(searchCond); + List subQueries = SubQueryUtils.findSubQueries(clonedSearchCond); + + RowResolver inputRR = m_relToHiveRR.get(srcRel); + + for (int i = 0; i < subQueries.size(); i++) { + ASTNode subQueryAST = subQueries.get(i); + ASTNode originalSubQueryAST = subQueriesInOriginalTree.get(i); + + int sqIdx = qb.incrNumSubQueryPredicates(); + clonedSearchCond = SubQueryUtils.rewriteParentQueryWhere(clonedSearchCond, subQueryAST); + + QBSubQuery subQuery = SubQueryUtils.buildSubQuery(qb.getId(), sqIdx, subQueryAST, + originalSubQueryAST, ctx); + + if (!forHavingClause) { + qb.setWhereClauseSubQueryPredicate(subQuery); + } else { + qb.setHavingClauseSubQueryPredicate(subQuery); + } + String havingInputAlias = null; + + if (forHavingClause) { + havingInputAlias = "gby_sq" + sqIdx; + aliasToRel.put(havingInputAlias, srcRel); + } + + subQuery.validateAndRewriteAST(inputRR, forHavingClause, havingInputAlias, + aliasToRel.keySet()); + + QB qbSQ = new QB(subQuery.getOuterQueryId(), subQuery.getAlias(), true); + qbSQ.setSubQueryDef(subQuery.getSubQuery()); + Phase1Ctx ctx_1 = initPhase1Ctx(); + doPhase1(subQuery.getSubQueryAST(), qbSQ, ctx_1); + getMetaData(qbSQ); + RelNode subQueryRelNode = genLogicalPlan(qbSQ); + aliasToRel.put(subQuery.getAlias(), subQueryRelNode); + RowResolver sqRR = m_relToHiveRR.get(subQueryRelNode); + + /* + * Check.5.h :: For In and Not In the SubQuery must implicitly or + * explicitly only contain one select item. + */ + if (subQuery.getOperator().getType() != SubQueryType.EXISTS + && subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS + && sqRR.getColumnInfos().size() - subQuery.getNumOfCorrelationExprsAddedToSQSelect() > 1) { + throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(subQueryAST, + "SubQuery can contain only 1 item in Select List.")); + } + + /* + * If this is a Not In SubQuery Predicate then Join in the Null Check + * SubQuery. See QBSubQuery.NotInCheck for details on why and how this + * is constructed. + */ + if (subQuery.getNotInCheck() != null) { + QBSubQuery.NotInCheck notInCheck = subQuery.getNotInCheck(); + notInCheck.setSQRR(sqRR); + QB qbSQ_nic = new QB(subQuery.getOuterQueryId(), notInCheck.getAlias(), true); + qbSQ_nic.setSubQueryDef(notInCheck.getSubQuery()); + ctx_1 = initPhase1Ctx(); + doPhase1(notInCheck.getSubQueryAST(), qbSQ_nic, ctx_1); + getMetaData(qbSQ_nic); + RelNode subQueryNICRelNode = genLogicalPlan(qbSQ_nic); + aliasToRel.put(notInCheck.getAlias(), subQueryNICRelNode); + srcRel = genJoinRelNode(srcRel, subQueryNICRelNode, + // set explicitly to inner until we figure out SemiJoin use + // notInCheck.getJoinType(), + JoinType.INNER, notInCheck.getJoinConditionAST()); + inputRR = m_relToHiveRR.get(srcRel); + if (forHavingClause) { + aliasToRel.put(havingInputAlias, srcRel); + } + } + + /* + * Gen Join between outer Operator and SQ op + */ + subQuery.buildJoinCondition(inputRR, sqRR, forHavingClause, havingInputAlias); + srcRel = genJoinRelNode(srcRel, subQueryRelNode, subQuery.getJoinType(), + subQuery.getJoinConditionAST()); + searchCond = subQuery.updateOuterQueryFilter(clonedSearchCond); + + srcRel = genFilterRelNode(searchCond, srcRel); + + /* + * For Not Exists and Not In, add a projection on top of the Left + * Outer Join. + */ + if (subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS + || subQuery.getOperator().getType() != SubQueryType.NOT_IN) { + srcRel = projectLeftOuterSide(srcRel, numSrcColumns); + } + } + return srcRel; + } + + return genFilterRelNode(searchCond, srcRel); + } + + private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws SemanticException { + RowResolver iRR = m_relToHiveRR.get(srcRel); + RowResolver oRR = new RowResolver(); + RowResolver.add(oRR, iRR, 0, numColumns); + + List optiqColLst = new ArrayList(); + List oFieldNames = new ArrayList(); + RelDataType iType = srcRel.getRowType(); + + for (int i = 0; i < iType.getFieldCount(); i++) { + RelDataTypeField fType = iType.getFieldList().get(i); + String fName = iType.getFieldNames().get(i); + optiqColLst.add(m_cluster.getRexBuilder().makeInputRef(fType.getType(), i)); + oFieldNames.add(fName); + } + + HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames); + + this.m_relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(oRR, selRel)); + this.m_relToHiveRR.put(selRel, oRR); + return selRel; + } + + private RelNode genFilterLogicalPlan(QB qb, RelNode srcRel, Map aliasToRel, + boolean forHavingClause) throws SemanticException { + RelNode filterRel = null; + + Iterator whereClauseIterator = getQBParseInfo(qb).getDestToWhereExpr().values() + .iterator(); + if (whereClauseIterator.hasNext()) { + filterRel = genFilterRelNode(qb, (ASTNode) whereClauseIterator.next().getChild(0), srcRel, + aliasToRel, forHavingClause); + } + + return filterRel; + } + + /** + * Class to store GenericUDAF related information. + */ + private class AggInfo { + private final List m_aggParams; + private final TypeInfo m_returnType; + private final String m_udfName; + private final boolean m_distinct; + + private AggInfo(List aggParams, TypeInfo returnType, String udfName, + boolean isDistinct) { + m_aggParams = aggParams; + m_returnType = returnType; + m_udfName = udfName; + m_distinct = isDistinct; + } + } + + private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List gbChildProjLst, + RexNodeConverter converter, HashMap rexNodeToPosMap, + Integer childProjLstIndx) throws SemanticException { + + // 1. Get agg fn ret type in Optiq + RelDataType aggFnRetType = TypeConverter.convert(agg.m_returnType, + this.m_cluster.getTypeFactory()); + + // 2. Convert Agg Fn args and type of args to Optiq + // TODO: Does HQL allows expressions as aggregate args or can it only be + // projections from child? + Integer inputIndx; + List argList = new ArrayList(); + RexNode rexNd = null; + RelDataTypeFactory dtFactory = this.m_cluster.getTypeFactory(); + ImmutableList.Builder aggArgRelDTBldr = new ImmutableList.Builder(); + for (ExprNodeDesc expr : agg.m_aggParams) { + rexNd = converter.convert(expr); + inputIndx = rexNodeToPosMap.get(rexNd.toString()); + if (inputIndx == null) { + gbChildProjLst.add(rexNd); + rexNodeToPosMap.put(rexNd.toString(), childProjLstIndx); + inputIndx = childProjLstIndx; + childProjLstIndx++; + } + argList.add(inputIndx); + + // TODO: does arg need type cast? + aggArgRelDTBldr.add(TypeConverter.convert(expr.getTypeInfo(), dtFactory)); + } + + // 3. Get Aggregation FN from Optiq given name, ret type and input arg + // type + final Aggregation aggregation = SqlFunctionConverter.getOptiqAggFn(agg.m_udfName, + aggArgRelDTBldr.build(), aggFnRetType); + + return new AggregateCall(aggregation, agg.m_distinct, argList, aggFnRetType, null); + } + + private RelNode genGBRelNode(List gbExprs, List aggInfoLst, + RelNode srcRel) throws SemanticException { + RowResolver gbInputRR = this.m_relToHiveRR.get(srcRel); + ArrayList signature = gbInputRR.getRowSchema().getSignature(); + ImmutableMap posMap = this.m_relToHiveColNameOptiqPosMap.get(srcRel); + RexNodeConverter converter = new RexNodeConverter(this.m_cluster, srcRel.getRowType(), + posMap, 0, false); + + final List gbChildProjLst = Lists.newArrayList(); + final HashMap rexNodeToPosMap = new HashMap(); + final BitSet groupSet = new BitSet(); + Integer gbIndx = 0; + RexNode rnd; + for (ExprNodeDesc key : gbExprs) { + rnd = converter.convert(key); + gbChildProjLst.add(rnd); + groupSet.set(gbIndx); + rexNodeToPosMap.put(rnd.toString(), gbIndx); + gbIndx++; + } + + List aggregateCalls = Lists.newArrayList(); + int i = aggInfoLst.size(); + for (AggInfo agg : aggInfoLst) { + aggregateCalls.add(convertGBAgg(agg, srcRel, gbChildProjLst, converter, rexNodeToPosMap, + gbChildProjLst.size())); + } + + if (gbChildProjLst.isEmpty()) { + // This will happen for count(*), in such cases we arbitarily pick + // first element from srcRel + gbChildProjLst.add(this.m_cluster.getRexBuilder().makeInputRef(srcRel, 0)); + } + RelNode gbInputRel = HiveProjectRel.create(srcRel, gbChildProjLst, null); + + HiveRel aggregateRel = null; + try { + aggregateRel = new HiveAggregateRel(m_cluster, m_cluster.traitSetOf(HiveRel.CONVENTION), + gbInputRel, groupSet, aggregateCalls); + } catch (InvalidRelException e) { + throw new SemanticException(e); + } + + return aggregateRel; + } + + private void addAlternateGByKeyMappings(ASTNode gByExpr, ColumnInfo colInfo, + RowResolver gByInputRR, RowResolver gByRR) { + if (gByExpr.getType() == HiveParser.DOT + && gByExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) { + String tab_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getChild(0) + .getText()); + String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(1).getText()); + gByRR.put(tab_alias, col_alias, colInfo); + } else if (gByExpr.getType() == HiveParser.TOK_TABLE_OR_COL) { + String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getText()); + String tab_alias = null; + /* + * If the input to the GBy has a tab alias for the column, then add an + * entry based on that tab_alias. For e.g. this query: select b.x, + * count(*) from t1 b group by x needs (tab_alias=b, col_alias=x) in the + * GBy RR. tab_alias=b comes from looking at the RowResolver that is the + * ancestor before any GBy/ReduceSinks added for the GBY operation. + */ + try { + ColumnInfo pColInfo = gByInputRR.get(tab_alias, col_alias); + tab_alias = pColInfo == null ? null : pColInfo.getTabAlias(); + } catch (SemanticException se) { + } + gByRR.put(tab_alias, col_alias, colInfo); + } + } + + private void addToGBExpr(RowResolver groupByOutputRowResolver, + RowResolver groupByInputRowResolver, ASTNode grpbyExpr, ExprNodeDesc grpbyExprNDesc, + List gbExprNDescLst, List outputColumnNames) { + // TODO: Should we use grpbyExprNDesc.getTypeInfo()? what if expr is + // UDF + int i = gbExprNDescLst.size(); + String field = getColumnInternalName(i); + outputColumnNames.add(field); + gbExprNDescLst.add(grpbyExprNDesc); + + ColumnInfo oColInfo = new ColumnInfo(field, grpbyExprNDesc.getTypeInfo(), null, false); + groupByOutputRowResolver.putExpression(grpbyExpr, oColInfo); + + // TODO: Alternate mappings, are they necessary? + addAlternateGByKeyMappings(grpbyExpr, oColInfo, groupByInputRowResolver, + groupByOutputRowResolver); + } + + private AggInfo getHiveAggInfo(ASTNode aggAst, int aggFnLstArgIndx, RowResolver inputRR) + throws SemanticException { + AggInfo aInfo = null; + + // 1 Convert UDAF Params to ExprNodeDesc + ArrayList aggParameters = new ArrayList(); + for (int i = 1; i <= aggFnLstArgIndx; i++) { + ASTNode paraExpr = (ASTNode) aggAst.getChild(i); + ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, inputRR); + aggParameters.add(paraExprNode); + } + + // 2. Is this distinct UDAF + boolean isDistinct = aggAst.getType() == HiveParser.TOK_FUNCTIONDI; + + // 3. Determine type of UDAF + TypeInfo udafRetType = null; + + // 3.1 Obtain UDAF name + String aggName = unescapeIdentifier(aggAst.getChild(0).getText()); + + // 3.2 Rank functions type is 'int'/'double' + if (FunctionRegistry.isRankingFunction(aggName)) { + if (aggName.equalsIgnoreCase("percent_rank")) + udafRetType = TypeInfoFactory.doubleTypeInfo; + else + udafRetType = TypeInfoFactory.intTypeInfo; + } else { + // 3.3 Try obtaining UDAF evaluators to determine the ret type + try { + boolean isAllColumns = aggAst.getType() == HiveParser.TOK_FUNCTIONSTAR; + + // 3.3.1 Get UDAF Evaluator + Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.COMPLETE, isDistinct); + GenericUDAFEvaluator genericUDAFEvaluator = getGenericUDAFEvaluator(aggName, + aggParameters, aggAst, isDistinct, isAllColumns); + assert (genericUDAFEvaluator != null); + + // 3.3.2 Get UDAF Info using UDAF Evaluator + GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); + udafRetType = udaf.returnType; + } catch (Exception e) { + LOG.debug("CBO: Couldn't Obtain UDAF evaluators for " + aggName + + ", trying to translate to GenericUDF"); + } + + // 3.4 Try GenericUDF translation + if (udafRetType == null) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + // We allow stateful functions in the SELECT list (but nowhere else) + tcCtx.setAllowStatefulFunctions(true); + tcCtx.setAllowDistinctFunctions(false); + ExprNodeDesc exp = genExprNodeDesc((ASTNode) aggAst.getChild(0), inputRR, tcCtx); + udafRetType = exp.getTypeInfo(); + } + } + + // 4. Construct AggInfo + aInfo = new AggInfo(aggParameters, udafRetType, aggName, isDistinct); + + return aInfo; + } + + /** + * Generate GB plan. + * + * @param qb + * @param srcRel + * @return TODO: 1. Grouping Sets (roll up..) + * @throws SemanticException + */ + private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + RelNode gbRel = null; + QBParseInfo qbp = getQBParseInfo(qb); + + // 1. Gather GB Expressions (AST) (GB + Aggregations) + // NOTE: Multi Insert is not supported + String detsClauseName = qbp.getClauseNames().iterator().next(); + List grpByAstExprs = getGroupByForClause(qbp, detsClauseName); + HashMap aggregationTrees = qbp.getAggregationExprsForClause(detsClauseName); + boolean hasGrpByAstExprs = (grpByAstExprs != null && !grpByAstExprs.isEmpty()) ? true : false; + boolean hasAggregationTrees = (aggregationTrees != null && !aggregationTrees.isEmpty()) ? true + : false; + + if (hasGrpByAstExprs || hasAggregationTrees) { + ArrayList gbExprNDescLst = new ArrayList(); + ArrayList outputColumnNames = new ArrayList(); + int numDistinctUDFs = 0; + + // 2. Input, Output Row Resolvers + RowResolver groupByInputRowResolver = this.m_relToHiveRR.get(srcRel); + RowResolver groupByOutputRowResolver = new RowResolver(); + groupByOutputRowResolver.setIsExprResolver(true); + + if (hasGrpByAstExprs) { + // 3. Construct GB Keys (ExprNode) + for (int i = 0; i < grpByAstExprs.size(); ++i) { + ASTNode grpbyExpr = grpByAstExprs.get(i); + Map astToExprNDescMap = TypeCheckProcFactory.genExprNode( + grpbyExpr, new TypeCheckCtx(groupByInputRowResolver)); + ExprNodeDesc grpbyExprNDesc = astToExprNDescMap.get(grpbyExpr); + if (grpbyExprNDesc == null) + throw new RuntimeException("Invalid Column Reference: " + grpbyExpr.dump()); + + addToGBExpr(groupByOutputRowResolver, groupByInputRowResolver, grpbyExpr, + grpbyExprNDesc, gbExprNDescLst, outputColumnNames); + } + } + + // 4. Construct aggregation function Info + ArrayList aggregations = new ArrayList(); + if (hasAggregationTrees) { + assert (aggregationTrees != null); + for (ASTNode value : aggregationTrees.values()) { + // 4.1 Determine type of UDAF + // This is the GenericUDAF name + String aggName = unescapeIdentifier(value.getChild(0).getText()); + boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; + boolean isAllColumns = value.getType() == HiveParser.TOK_FUNCTIONSTAR; + if (isDistinct) { + numDistinctUDFs++; + } + + // 4.2 Convert UDAF Params to ExprNodeDesc + ArrayList aggParameters = new ArrayList(); + for (int i = 1; i < value.getChildCount(); i++) { + ASTNode paraExpr = (ASTNode) value.getChild(i); + ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, groupByInputRowResolver); + aggParameters.add(paraExprNode); + } + + Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.COMPLETE, isDistinct); + GenericUDAFEvaluator genericUDAFEvaluator = getGenericUDAFEvaluator(aggName, + aggParameters, value, isDistinct, isAllColumns); + assert (genericUDAFEvaluator != null); + GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); + AggInfo aInfo = new AggInfo(aggParameters, udaf.returnType, aggName, isDistinct); + aggregations.add(aInfo); + String field = getColumnInternalName(gbExprNDescLst.size() + aggregations.size() - 1); + outputColumnNames.add(field); + groupByOutputRowResolver.putExpression(value, new ColumnInfo(field, aInfo.m_returnType, + "", false)); + } + } + + gbRel = genGBRelNode(gbExprNDescLst, aggregations, srcRel); + m_relToHiveColNameOptiqPosMap.put(gbRel, + buildHiveToOptiqColumnMap(groupByOutputRowResolver, gbRel)); + this.m_relToHiveRR.put(gbRel, groupByOutputRowResolver); + } + + return gbRel; + } + + private RelNode genOBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + RelNode relToRet = null; + + QBParseInfo qbp = getQBParseInfo(qb); + String dest = qbp.getClauseNames().iterator().next(); + ASTNode obAST = qbp.getOrderByForClause(dest); + + if (obAST != null) { + // 1. OB Expr sanity test + // in strict mode, in the presence of order by, limit must be specified + Integer limit = qb.getParseInfo().getDestLimit(dest); + if (conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict") + && limit == null) { + throw new SemanticException(generateErrorMessage(obAST, + ErrorMsg.NO_LIMIT_WITH_ORDERBY.getMsg())); + } + + // 2. Walk through OB exprs and extract field collations and additional + // virtual columns needed + final List newVCLst = new ArrayList(); + final List fieldCollations = Lists.newArrayList(); + int fieldIndex = 0; + + List obASTExprLst = obAST.getChildren(); + ASTNode obASTExpr; + List> vcASTTypePairs = new ArrayList>(); + RowResolver inputRR = m_relToHiveRR.get(srcRel); + RowResolver outputRR = new RowResolver(); + + RexNode rnd; + RexNodeConverter converter = new RexNodeConverter(m_cluster, srcRel.getRowType(), + m_relToHiveColNameOptiqPosMap.get(srcRel), 0, false); + int srcRelRecordSz = srcRel.getRowType().getFieldCount(); + + for (int i = 0; i < obASTExprLst.size(); i++) { + // 2.1 Convert AST Expr to ExprNode + obASTExpr = (ASTNode) obASTExprLst.get(i); + Map astToExprNDescMap = TypeCheckProcFactory.genExprNode( + obASTExpr, new TypeCheckCtx(inputRR)); + ExprNodeDesc obExprNDesc = astToExprNDescMap.get(obASTExpr.getChild(0)); + if (obExprNDesc == null) + throw new SemanticException("Invalid order by expression: " + obASTExpr.toString()); + + // 2.2 Convert ExprNode to RexNode + rnd = converter.convert(obExprNDesc); + + // 2.3 Determine the index of ob expr in child schema + // NOTE: Optiq can not take compound exprs in OB without it being + // present in the child (& hence we add a child Project Rel) + if (rnd instanceof RexInputRef) { + fieldIndex = ((RexInputRef) rnd).getIndex(); + } else { + fieldIndex = srcRelRecordSz + newVCLst.size(); + newVCLst.add(rnd); + vcASTTypePairs.add(new Pair((ASTNode) obASTExpr.getChild(0), + obExprNDesc.getTypeInfo())); + } + + // 2.4 Determine the Direction of order by + org.eigenbase.rel.RelFieldCollation.Direction order = RelFieldCollation.Direction.DESCENDING; + if (obASTExpr.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { + order = RelFieldCollation.Direction.ASCENDING; + } + + // 2.5 Add to field collations + fieldCollations.add(new RelFieldCollation(fieldIndex, order)); + } + + // 3. Add Child Project Rel if needed + RelNode obInputRel = srcRel; + if (!newVCLst.isEmpty()) { + List originalInputRefs = Lists.transform(srcRel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }); + + obInputRel = HiveProjectRel.create(srcRel, CompositeList.of(originalInputRefs, newVCLst), + null); + } + + // 4. Construct SortRel + RelTraitSet traitSet = m_cluster.traitSetOf(HiveRel.CONVENTION); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations)); + // TODO: Is it better to introduce a + // project on top to restrict VC from showing up in sortRel type + RelNode sortRel = new HiveSortRel(m_cluster, traitSet, obInputRel, canonizedCollation, + null, null); + + // 5. Construct OB Parent Rel If needed + // Construct a parent Project if OB has virtual columns(vc) otherwise + // vc would show up in the result + // TODO: If OB is part of sub query & Parent Query select is not of the + // type "select */.*..." then parent project is not needed + relToRet = sortRel; + if (!newVCLst.isEmpty()) { + List obParentRelProjs = Lists.transform(srcRel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }); + + relToRet = HiveProjectRel.create(sortRel, obParentRelProjs, null); + } + + // 6. Construct output RR + RowResolver.add(outputRR, inputRR, 0); + + // 7. Update the maps + // NOTE: Output RR for SortRel is considered same as its input; we may + // end up not using VC that is present in sort rel. Also note that + // rowtype of sortrel is the type of it child; if child happens to be + // synthetic project that we introduced then that projectrel would + // contain the vc. + ImmutableMap hiveColNameOptiqPosMap = buildHiveToOptiqColumnMap(outputRR, + relToRet); + m_relToHiveRR.put(relToRet, outputRR); + m_relToHiveColNameOptiqPosMap.put(relToRet, hiveColNameOptiqPosMap); + } + + return relToRet; + } + + private RelNode genLimitLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + HiveRel sortRel = null; + QBParseInfo qbp = getQBParseInfo(qb); + Integer limit = qbp.getDestToLimit().get(qbp.getClauseNames().iterator().next()); + + if (limit != null) { + RexNode fetch = m_cluster.getRexBuilder().makeExactLiteral(BigDecimal.valueOf(limit)); + RelTraitSet traitSet = m_cluster.traitSetOf(HiveRel.CONVENTION); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.EMPTY); + sortRel = new HiveSortRel(m_cluster, traitSet, srcRel, canonizedCollation, null, fetch); + + RowResolver outputRR = new RowResolver(); + RowResolver.add(outputRR, m_relToHiveRR.get(srcRel), 0); + ImmutableMap hiveColNameOptiqPosMap = buildHiveToOptiqColumnMap(outputRR, + sortRel); + m_relToHiveRR.put(sortRel, outputRR); + m_relToHiveColNameOptiqPosMap.put(sortRel, hiveColNameOptiqPosMap); + } + + return sortRel; + } + + List getPartitionKeys(PartitionSpec ps, RexNodeConverter converter, RowResolver inputRR) + throws SemanticException { + List pKeys = new ArrayList(); + if (ps != null) { + List pExprs = ps.getExpressions(); + for (PartitionExpression pExpr : pExprs) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(pExpr.getExpression(), inputRR, tcCtx); + pKeys.add(converter.convert(exp)); + } + } + + return pKeys; + } + + List getOrderKeys(OrderSpec os, RexNodeConverter converter, + RowResolver inputRR) throws SemanticException { + List oKeys = new ArrayList(); + if (os != null) { + List oExprs = os.getExpressions(); + for (OrderExpression oExpr : oExprs) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(oExpr.getExpression(), inputRR, tcCtx); + RexNode ordExp = converter.convert(exp); + Set flags = new HashSet(); + if (oExpr.getOrder() == org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC) + flags.add(SqlKind.DESCENDING); + oKeys.add(new RexFieldCollation(ordExp, flags)); + } + } + + return oKeys; + } + + RexWindowBound getBound(BoundarySpec bs, RexNodeConverter converter) { + RexWindowBound rwb = null; + + if (bs != null) { + SqlNode sn = null; + SqlParserPos pos = new SqlParserPos(1, 1); + SqlNode amt = bs.getAmt() == 0 ? null : SqlLiteral.createExactNumeric( + String.valueOf(bs.getAmt()), new SqlParserPos(2, 2)); + RexNode amtLiteral = null; + SqlCall sc = null; + RexNode rn = null; + + if (amt != null) + amtLiteral = m_cluster.getRexBuilder().makeLiteral(new Integer(bs.getAmt()), + m_cluster.getTypeFactory().createSqlType(SqlTypeName.INTEGER), true); + + switch (bs.getDirection()) { + case PRECEDING: + if (amt == null) { + rwb = RexWindowBound.create(SqlWindow.createUnboundedPreceding(pos), null); + } else { + sc = (SqlCall) SqlWindow.createPreceding(amt, pos); + rwb = RexWindowBound.create(sc, + m_cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral)); + } + break; + + case CURRENT: + rwb = RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(1, 1)), null); + break; + + case FOLLOWING: + if (amt == null) { + rwb = RexWindowBound.create(SqlWindow.createUnboundedFollowing(new SqlParserPos(1, 1)), + null); + } else { + sc = (SqlCall) SqlWindow.createFollowing(amt, pos); + rwb = RexWindowBound.create(sc, + m_cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral)); + } + break; + } + } + + return rwb; + } + + int getWindowSpecIndx(ASTNode wndAST) { + int wndASTIndx = -1; + int wi = wndAST.getChildCount() - 1; + if (wi <= 0 || (wndAST.getChild(wi).getType() != HiveParser.TOK_WINDOWSPEC)) { + wi = -1; + } + + return wi; + } + + Pair genWindowingProj(QB qb, WindowExpressionSpec wExpSpec, RelNode srcRel) + throws SemanticException { + RexNode w = null; + TypeInfo wHiveRetType = null; + + if (wExpSpec instanceof WindowFunctionSpec) { + WindowFunctionSpec wFnSpec = (WindowFunctionSpec) wExpSpec; + ASTNode windowProjAst = wFnSpec.getExpression(); + // TODO: do we need to get to child? + int wndSpecASTIndx = getWindowSpecIndx(windowProjAst); + // 2. Get Hive Aggregate Info + AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1, + this.m_relToHiveRR.get(srcRel)); + + // 3. Get Optiq Return type for Agg Fn + wHiveRetType = hiveAggInfo.m_returnType; + RelDataType optiqAggFnRetType = TypeConverter.convert(hiveAggInfo.m_returnType, + this.m_cluster.getTypeFactory()); + + // 4. Convert Agg Fn args to Optiq + ImmutableMap posMap = this.m_relToHiveColNameOptiqPosMap.get(srcRel); + RexNodeConverter converter = new RexNodeConverter(this.m_cluster, srcRel.getRowType(), + posMap, 0, false); + Builder optiqAggFnArgsBldr = ImmutableList. builder(); + Builder optiqAggFnArgsTypeBldr = ImmutableList. builder(); + RexNode rexNd = null; + for (int i = 0; i < hiveAggInfo.m_aggParams.size(); i++) { + optiqAggFnArgsBldr.add(converter.convert(hiveAggInfo.m_aggParams.get(i))); + optiqAggFnArgsTypeBldr.add(TypeConverter.convert(hiveAggInfo.m_aggParams.get(i) + .getTypeInfo(), this.m_cluster.getTypeFactory())); + } + ImmutableList optiqAggFnArgs = optiqAggFnArgsBldr.build(); + ImmutableList optiqAggFnArgsType = optiqAggFnArgsTypeBldr.build(); + + // 5. Get Optiq Agg Fn + final SqlAggFunction optiqAggFn = SqlFunctionConverter.getOptiqAggFn(hiveAggInfo.m_udfName, + optiqAggFnArgsType, optiqAggFnRetType); + + // 6. Translate Window spec + RowResolver inputRR = m_relToHiveRR.get(srcRel); + WindowSpec wndSpec = ((WindowFunctionSpec) wExpSpec).getWindowSpec(); + List partitionKeys = getPartitionKeys(wndSpec.getPartition(), converter, inputRR); + List orderKeys = getOrderKeys(wndSpec.getOrder(), converter, inputRR); + RexWindowBound upperBound = getBound(wndSpec.windowFrame.start, converter); + RexWindowBound lowerBound = getBound(wndSpec.windowFrame.end, converter); + boolean isRows = ((wndSpec.windowFrame.start instanceof RangeBoundarySpec) || (wndSpec.windowFrame.end instanceof RangeBoundarySpec)) ? true + : false; + + w = m_cluster.getRexBuilder().makeOver(optiqAggFnRetType, optiqAggFn, optiqAggFnArgs, + partitionKeys, ImmutableList. copyOf(orderKeys), lowerBound, + upperBound, isRows, true, false); + } else { + // TODO: Convert to Semantic Exception + throw new RuntimeException("Unsupported window Spec"); + } + + return new Pair(w, wHiveRetType); + } + + private RelNode genSelectForWindowing(QB qb, RelNode srcRel) throws SemanticException { + RelNode selOpForWindow = null; + QBParseInfo qbp = getQBParseInfo(qb); + WindowingSpec wSpec = (!qb.getAllWindowingSpecs().isEmpty()) ? qb.getAllWindowingSpecs() + .values().iterator().next() : null; + + if (wSpec != null) { + // 1. Get valid Window Function Spec + wSpec.validateAndMakeEffective(); + List windowExpressions = wSpec.getWindowExpressions(); + + if (windowExpressions != null && !windowExpressions.isEmpty()) { + RowResolver inputRR = this.m_relToHiveRR.get(srcRel); + // 2. Get RexNodes for original Projections from below + List projsForWindowSelOp = new ArrayList( + HiveOptiqUtil.getProjsFromBelowAsInputRef(srcRel)); + + // 3. Construct new Row Resolver with everything from below. + RowResolver out_rwsch = new RowResolver(); + RowResolver.add(out_rwsch, inputRR, 0); + + // 4. Walk through Window Expressions & Construct RexNodes for those, + // Update out_rwsch + for (WindowExpressionSpec wExprSpec : windowExpressions) { + if (out_rwsch.getExpression(wExprSpec.getExpression()) == null) { + Pair wtp = genWindowingProj(qb, wExprSpec, srcRel); + projsForWindowSelOp.add(wtp.getFirst()); + + // 6.2.2 Update Output Row Schema + ColumnInfo oColInfo = new ColumnInfo( + getColumnInternalName(projsForWindowSelOp.size()), wtp.getSecond(), null, false); + String colAlias = wExprSpec.getAlias(); + if (false) { + out_rwsch.checkColumn(null, wExprSpec.getAlias()); + out_rwsch.put(null, wExprSpec.getAlias(), oColInfo); + } else { + out_rwsch.putExpression(wExprSpec.getExpression(), oColInfo); + } + } + } + + selOpForWindow = genSelectRelNode(projsForWindowSelOp, out_rwsch, srcRel); + } + } + + return selOpForWindow; + } + + private RelNode genSelectRelNode(List optiqColLst, RowResolver out_rwsch, + RelNode srcRel) { + // 1. Build Column Names + // TODO: Should this be external names + ArrayList columnNames = new ArrayList(); + for (int i = 0; i < optiqColLst.size(); i++) { + columnNames.add(getColumnInternalName(i)); + } + + // 2. Prepend column names with '_o_' + /* + * Hive treats names that start with '_c' as internalNames; so change the + * names so we don't run into this issue when converting back to Hive AST. + */ + List oFieldNames = Lists.transform(columnNames, new Function() { + @Override + public String apply(String hName) { + return "_o_" + hName; + } + }); + + // 3 Build Optiq Rel Node for project using converted projections & col + // names + HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames); + + // 4. Keep track of colname-to-posmap && RR for new select + this.m_relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(out_rwsch, selRel)); + this.m_relToHiveRR.put(selRel, out_rwsch); + + return selRel; + } + + /** + * NOTE: there can only be one select caluse since we don't handle multi + * destination insert. + * + * @throws SemanticException + */ + private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + + // 0. Generate a Select Node for Windowing + RelNode selForWindow = genSelectForWindowing(qb, srcRel); + srcRel = (selForWindow == null) ? srcRel : selForWindow; + + boolean subQuery; + ArrayList col_list = new ArrayList(); + ArrayList> windowingRexNodes = new ArrayList>(); + + // 1. Get Select Expression List + QBParseInfo qbp = getQBParseInfo(qb); + String selClauseName = qbp.getClauseNames().iterator().next(); + ASTNode selExprList = qbp.getSelForClause(selClauseName); + + // 2.Row resolvers for input, output + RowResolver out_rwsch = new RowResolver(); + ASTNode trfm = null; + Integer pos = Integer.valueOf(0); + RowResolver inputRR = this.m_relToHiveRR.get(srcRel); + + // 3. Query Hints + // TODO: Handle Query Hints; currently we ignore them + boolean selectStar = false; + int posn = 0; + boolean hintPresent = (selExprList.getChild(0).getType() == HiveParser.TOK_HINTLIST); + if (hintPresent) { + posn++; + } + + // 4. Determine if select corresponds to a subquery + subQuery = qb.getParseInfo().getIsSubQ(); + + // 4. Bailout if select involves Transform + boolean isInTransform = (selExprList.getChild(posn).getChild(0).getType() == HiveParser.TOK_TRANSFORM); + if (isInTransform) { + throw new RuntimeException("SELECT TRANSFORM not supported"); + } + + // 5. Bailout if select involves UDTF + ASTNode udtfExpr = (ASTNode) selExprList.getChild(posn).getChild(0); + GenericUDTF genericUDTF = null; + int udtfExprType = udtfExpr.getType(); + if (udtfExprType == HiveParser.TOK_FUNCTION || udtfExprType == HiveParser.TOK_FUNCTIONSTAR) { + String funcName = TypeCheckProcFactory.DefaultExprProcessor.getFunctionText(udtfExpr, true); + FunctionInfo fi = FunctionRegistry.getFunctionInfo(funcName); + if (fi != null) { + genericUDTF = fi.getGenericUDTF(); + } + if (genericUDTF != null) { + throw new RuntimeException("SELECT UDTF not supported"); + } + } + + // 6. Iterate over all expression (after SELECT) + ASTNode exprList = selExprList; + int startPosn = posn; + int wndProjPos = 0; + List tabAliasesForAllProjs = getTabAliases(inputRR); + for (int i = startPosn; i < exprList.getChildCount(); ++i) { + + // 6.1 child can be EXPR AS ALIAS, or EXPR. + ASTNode child = (ASTNode) exprList.getChild(i); + boolean hasAsClause = (!isInTransform) && (child.getChildCount() == 2); + + // 6.2 EXPR AS (ALIAS,...) parses, but is only allowed for UDTF's + // This check is not needed and invalid when there is a transform b/c + // the + // AST's are slightly different. + if (child.getChildCount() > 2) { + throw new SemanticException(generateErrorMessage((ASTNode) child.getChild(2), + ErrorMsg.INVALID_AS.getMsg())); + } + + ASTNode expr; + String tabAlias; + String colAlias; + + // 6.3 Get rid of TOK_SELEXPR + expr = (ASTNode) child.getChild(0); + String[] colRef = getColAlias(child, autogenColAliasPrfxLbl, inputRR, + autogenColAliasPrfxIncludeFuncName, i); + tabAlias = colRef[0]; + colAlias = colRef[1]; + + // 6.4 Build ExprNode corresponding to colums + if (expr.getType() == HiveParser.TOK_ALLCOLREF) { + pos = genColListRegex(".*", + expr.getChildCount() == 0 ? null : getUnescapedName((ASTNode) expr.getChild(0)) + .toLowerCase(), expr, col_list, inputRR, pos, out_rwsch, tabAliasesForAllProjs, + subQuery); + selectStar = true; + } else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL && !hasAsClause + && !inputRR.getIsExprResolver() + && isRegex(unescapeIdentifier(expr.getChild(0).getText()), conf)) { + // In case the expression is a regex COL. + // This can only happen without AS clause + // We don't allow this for ExprResolver - the Group By case + pos = genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), null, expr, + col_list, inputRR, pos, out_rwsch, tabAliasesForAllProjs, subQuery); + } else if (expr.getType() == HiveParser.DOT + && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL + && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0).getChild(0).getText() + .toLowerCase())) && !hasAsClause && !inputRR.getIsExprResolver() + && isRegex(unescapeIdentifier(expr.getChild(1).getText()), conf)) { + // In case the expression is TABLE.COL (col can be regex). + // This can only happen without AS clause + // We don't allow this for ExprResolver - the Group By case + pos = genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), + unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr, + col_list, inputRR, pos, out_rwsch, tabAliasesForAllProjs, subQuery); + } else { + // Case when this is an expression + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + // We allow stateful functions in the SELECT list (but nowhere else) + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(expr, inputRR, tcCtx); + String recommended = recommendName(exp, colAlias); + if (recommended != null && out_rwsch.get(null, recommended) == null) { + colAlias = recommended; + } + col_list.add(exp); + if (subQuery) { + out_rwsch.checkColumn(tabAlias, colAlias); + } + + ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), + exp.getWritableObjectInspector(), tabAlias, false); + colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp) + .isSkewedCol() : false); + out_rwsch.put(tabAlias, colAlias, colInfo); + + if (exp instanceof ExprNodeColumnDesc) { + ExprNodeColumnDesc colExp = (ExprNodeColumnDesc) exp; + String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn()); + if (altMapping != null) { + out_rwsch.put(altMapping[0], altMapping[1], colInfo); + } + } + + pos = Integer.valueOf(pos.intValue() + 1); + } + } + selectStar = selectStar && exprList.getChildCount() == posn + 1; + + // 7. Convert Hive projections to Optiq + List optiqColLst = new ArrayList(); + RexNodeConverter rexNodeConv = new RexNodeConverter(m_cluster, srcRel.getRowType(), + buildHiveColNameToInputPosMap(col_list, inputRR), 0, false); + for (ExprNodeDesc colExpr : col_list) { + optiqColLst.add(rexNodeConv.convert(colExpr)); + } + + // 8. Build Optiq Rel + RelNode selRel = genSelectRelNode(optiqColLst, out_rwsch, srcRel); + + return selRel; + } + + private RelNode genLogicalPlan(QBExpr qbexpr) throws SemanticException { + if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) { + return genLogicalPlan(qbexpr.getQB()); + } + if (qbexpr.getOpcode() == QBExpr.Opcode.UNION) { + RelNode qbexpr1Ops = genLogicalPlan(qbexpr.getQBExpr1()); + RelNode qbexpr2Ops = genLogicalPlan(qbexpr.getQBExpr2()); + + return genUnionLogicalPlan(qbexpr.getAlias(), qbexpr.getQBExpr1().getAlias(), qbexpr1Ops, + qbexpr.getQBExpr2().getAlias(), qbexpr2Ops); + } + return null; + } + + private RelNode genLogicalPlan(QB qb) throws SemanticException { + RelNode srcRel = null; + RelNode filterRel = null; + RelNode gbRel = null; + RelNode gbHavingRel = null; + RelNode havingRel = null; + RelNode selectRel = null; + RelNode obRel = null; + RelNode limitRel = null; + + RelNode rootRel = null; + // First generate all the opInfos for the elements in the from clause + Map aliasToRel = new HashMap(); + + // 1. Build Rel For Src (SubQuery, TS, Join) + // 1.1. Recurse over the subqueries to fill the subquery part of the plan + for (String subqAlias : qb.getSubqAliases()) { + QBExpr qbexpr = qb.getSubqForAlias(subqAlias); + aliasToRel.put(subqAlias, genLogicalPlan(qbexpr)); + qbexpr.setAlias(subqAlias); + } + + // 1.2 Recurse over all the source tables + for (String tableAlias : qb.getTabAliases()) { + RelNode op = genTableLogicalPlan(tableAlias, qb); + aliasToRel.put(tableAlias, op); + } + + // 1.3 process join + if (qb.getParseInfo().getJoinExpr() != null) { + srcRel = genJoinLogicalPlan(qb.getParseInfo().getJoinExpr(), aliasToRel); + } else { + // If no join then there should only be either 1 TS or 1 SubQuery + srcRel = aliasToRel.values().iterator().next(); + } + + // 2. Build Rel for where Clause + filterRel = genFilterLogicalPlan(qb, srcRel, aliasToRel, false); + srcRel = (filterRel == null) ? srcRel : filterRel; + + // 3. Build Rel for GB Clause + gbRel = genGBLogicalPlan(qb, srcRel); + srcRel = (gbRel == null) ? srcRel : gbRel; + + // 4. Build Rel for GB Having Clause + gbHavingRel = genGBHavingLogicalPlan(qb, srcRel, aliasToRel); + srcRel = (gbHavingRel == null) ? srcRel : gbHavingRel; + + // 5. Build Rel for Select Clause + selectRel = genSelectLogicalPlan(qb, srcRel); + srcRel = (selectRel == null) ? srcRel : selectRel; + + // 6. Incase this QB corresponds to subquery then modify its RR to point + // to subquery alias + // TODO: cleanup this + if (qb.getParseInfo().getAlias() != null) { + RowResolver rr = this.m_relToHiveRR.get(srcRel); + RowResolver newRR = new RowResolver(); + String alias = qb.getParseInfo().getAlias(); + for (ColumnInfo colInfo : rr.getColumnInfos()) { + String name = colInfo.getInternalName(); + String[] tmp = rr.reverseLookup(name); + if ("".equals(tmp[0]) || tmp[1] == null) { + // ast expression is not a valid column name for table + tmp[1] = colInfo.getInternalName(); + } + ColumnInfo newCi = new ColumnInfo(colInfo); + newCi.setTabAlias(alias); + newRR.put(alias, tmp[1], newCi); + } + m_relToHiveRR.put(srcRel, newRR); + m_relToHiveColNameOptiqPosMap.put(srcRel, buildHiveToOptiqColumnMap(newRR, srcRel)); + } + + // 7. Build Rel for OB Clause + obRel = genOBLogicalPlan(qb, srcRel); + srcRel = (obRel == null) ? srcRel : obRel; + + // 8. Build Rel for Limit Clause + limitRel = genLimitLogicalPlan(qb, srcRel); + srcRel = (limitRel == null) ? srcRel : limitRel; + + if (LOG.isDebugEnabled()) { + LOG.debug("Created Plan for Query Block " + qb.getId()); + } + + return srcRel; + } + + private RelNode genGBHavingLogicalPlan(QB qb, RelNode srcRel, Map aliasToRel) + throws SemanticException { + RelNode gbFilter = null; + QBParseInfo qbp = getQBParseInfo(qb); + ASTNode havingClause = qbp.getHavingForClause(qbp.getClauseNames().iterator().next()); + + if (havingClause != null) + gbFilter = genFilterRelNode(qb, (ASTNode) havingClause.getChild(0), srcRel, aliasToRel, + true); + + return gbFilter; + } + + private ImmutableMap buildHiveToOptiqColumnMap(RowResolver rr, RelNode rNode) { + ImmutableMap.Builder b = new ImmutableMap.Builder(); + int i = 0; + for (ColumnInfo ci : rr.getRowSchema().getSignature()) { + b.put(ci.getInternalName(), rr.getPosition(ci.getInternalName())); + } + return b.build(); + } + + private ImmutableMap buildHiveColNameToInputPosMap( + List col_list, RowResolver inputRR) { + // Build a map of Hive column Names (ExprNodeColumnDesc Name) + // to the positions of those projections in the input + Map hashCodeTocolumnDescMap = new HashMap(); + ExprNodeDescUtils.getExprNodeColumnDesc(col_list, hashCodeTocolumnDescMap); + ImmutableMap.Builder hiveColNameToInputPosMapBuilder = new ImmutableMap.Builder(); + String exprNodecolName; + for (ExprNodeDesc exprDesc : hashCodeTocolumnDescMap.values()) { + exprNodecolName = ((ExprNodeColumnDesc) exprDesc).getColumn(); + hiveColNameToInputPosMapBuilder.put(exprNodecolName, inputRR.getPosition(exprNodecolName)); + } + + return hiveColNameToInputPosMapBuilder.build(); + } + + private QBParseInfo getQBParseInfo(QB qb) { + QBParseInfo qbp = qb.getParseInfo(); + if (qbp.getClauseNames().size() > 1) + throw new RuntimeException("Multi Insert is not supported"); + return qbp; + } + + private List getTabAliases(RowResolver inputRR) { + List tabAliases = new ArrayList(); + for (ColumnInfo ci : inputRR.getColumnInfos()) { + tabAliases.add(ci.getTabAlias()); + } + + return tabAliases; + } + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java index c69f747..9ad6714 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java @@ -262,4 +262,10 @@ public String toString() { ASTNode targetNode; ASTNode sourceNode; } + + public void clear() { + translations.clear(); + copyTranslations.clear(); + enabled = false; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java index 3295aba..8a41577 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -93,7 +94,7 @@ public String getExprString() { return "null"; } - if (typeInfo.getTypeName().equals(serdeConstants.STRING_TYPE_NAME)) { + if (typeInfo.getTypeName().equals(serdeConstants.STRING_TYPE_NAME) || typeInfo instanceof BaseCharTypeInfo) { return "'" + value.toString() + "'"; } else if (typeInfo.getTypeName().equals(serdeConstants.BINARY_TYPE_NAME)) { byte[] bytes = (byte[]) value; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java index f293c43..75372cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java @@ -372,5 +372,42 @@ private static ExprNodeConstantDesc foldConstant(ExprNodeGenericFuncDesc func) { } catch (Exception e) { return null; } - } + } + + public static void getExprNodeColumnDesc(List exprDescList, + Map hashCodeTocolumnDescMap) { + for (ExprNodeDesc exprNodeDesc : exprDescList) { + getExprNodeColumnDesc(exprNodeDesc, hashCodeTocolumnDescMap); + } + } + + /** + * Get Map of ExprNodeColumnDesc HashCode to ExprNodeColumnDesc. + * + * @param exprDesc + * @param hashCodeTocolumnDescMap + * Assumption: If two ExprNodeColumnDesc have same hash code then + * they are logically referring to same projection + */ + public static void getExprNodeColumnDesc(ExprNodeDesc exprDesc, + Map hashCodeTocolumnDescMap) { + if (exprDesc instanceof ExprNodeColumnDesc) { + hashCodeTocolumnDescMap.put( + ((ExprNodeColumnDesc) exprDesc).hashCode(), + ((ExprNodeColumnDesc) exprDesc)); + } else if (exprDesc instanceof ExprNodeColumnListDesc) { + for (ExprNodeDesc child : ((ExprNodeColumnListDesc) exprDesc) + .getChildren()) { + getExprNodeColumnDesc(child, hashCodeTocolumnDescMap); + } + } else if (exprDesc instanceof ExprNodeGenericFuncDesc) { + for (ExprNodeDesc child : ((ExprNodeGenericFuncDesc) exprDesc) + .getChildren()) { + getExprNodeColumnDesc(child, hashCodeTocolumnDescMap); + } + } else if (exprDesc instanceof ExprNodeFieldDesc) { + getExprNodeColumnDesc(((ExprNodeFieldDesc) exprDesc).getDesc(), + hashCodeTocolumnDescMap); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java index 57ab9de..536e788 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java @@ -122,6 +122,7 @@ private void initRetainExprList() { } } + @Explain(displayName = "input vertices") public Map getParentToInput() { return parentToInput; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index a375e28..c46f15c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -91,6 +91,7 @@ private static final Log LOG = LogFactory.getLog(StatsUtils.class.getName()); + /** * Collect table, partition and column level statistics * @param conf @@ -107,15 +108,30 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, Table table, TableScanOperator tableScanOperator) throws HiveException { - Statistics stats = new Statistics(); - // column level statistics are required only for the columns that are needed List schema = tableScanOperator.getSchema().getSignature(); List neededColumns = tableScanOperator.getNeededColumns(); + + return collectStatistics(conf, partList, table, schema, neededColumns); + } + + private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, + Table table, List schema, List neededColumns) throws HiveException { + boolean fetchColStats = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_FETCH_COLUMN_STATS); boolean fetchPartStats = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_FETCH_PARTITION_STATS); + + return collectStatistics(conf, partList, table, schema, neededColumns, fetchColStats, fetchPartStats); + } + + public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, + Table table, List schema, List neededColumns, + boolean fetchColStats, boolean fetchPartStats) throws HiveException { + + Statistics stats = new Statistics(); + float deserFactor = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_STATS_DESERIALIZATION_FACTOR); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java index a0aeccf..f76fc10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -33,7 +34,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.Text; - +@Description(name = "from_utc_timestamp", + value = "from_utc_timestamp(timestamp, string timezone) - " + + "Assumes given timestamp ist UTC and converts to given timezone (as of Hive 0.8.0)") public class GenericUDFFromUtcTimestamp extends GenericUDF { static final Log LOG = LogFactory.getLog(GenericUDFFromUtcTimestamp.class); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java index ba4fed7..2f854f4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; @@ -39,6 +40,8 @@ * Creates a TimestampWritable object using PrimitiveObjectInspectorConverter * */ +@Description(name = "timestamp", +value = "cast(date as timestamp) - Returns timestamp") @VectorizedExpressions({CastLongToTimestampViaLongToLong.class, CastDoubleToTimestampViaDoubleToLong.class, CastDecimalToTimestamp.class}) public class GenericUDFTimestamp extends GenericUDF { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java index af4da3a..4234346 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; +@Description(name = "to_utc_timestamp", + value = "to_utc_timestamp(timestamp, string timezone) - " + + "Assumes given timestamp is in given timezone and converts to UTC (as of Hive 0.8.0)") public class GenericUDFToUtcTimestamp extends GenericUDFFromUtcTimestamp { diff --git a/ql/src/test/queries/clientpositive/cbo_correctness.q b/ql/src/test/queries/clientpositive/cbo_correctness.q new file mode 100644 index 0000000..ddba40a --- /dev/null +++ b/ql/src/test/queries/clientpositive/cbo_correctness.q @@ -0,0 +1,460 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; + +create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; +create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; +create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE; + +load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014'); +load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014'); +load data local inpath '../../data/files/cbo_t3.txt' into table t3; + +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +); + +LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; + +DROP TABLE lineitem; +CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|'; + +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; + +create table src_cbo as select * from src; + + +set hive.stats.dbclass=jdbc:derby; +analyze table t1 partition (dt) compute statistics; +analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table t2 partition (dt) compute statistics; +analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table t3 compute statistics; +analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table src_cbo compute statistics; +analyze table src_cbo compute statistics for columns; +analyze table part compute statistics; +analyze table part compute statistics for columns; +analyze table lineitem compute statistics; +analyze table lineitem compute statistics for columns; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 1. Test Select + TS +select * from t1; +select * from t1 as t1; +select * from t1 as t2; + +select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1; + +-- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0; +select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; + +select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; + +-- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1; +select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; +select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; +select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; + +select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0; +select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100; + +select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0; +select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0; + +-- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key; +select t1.key from t1 join t3; +select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1; +select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key; +select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key; +select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; +select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p; +select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key; +select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; + +-- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); +select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); +select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); +select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0); + +select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); + + + +select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); + +select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + +-- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int; +select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key; +select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c; + +-- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1; +select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1; +select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1; +select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5; +select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5; + +-- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1; +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int; +select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1; +select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1; +select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1; +select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1; +select count(c_int) as a, avg(c_float), key from t1 group by key; +select count(distinct c_int) as a, avg(c_float) from t1 group by c_float; +select count(distinct c_int) as a, avg(c_float) from t1 group by c_int; +select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int; + +-- 9. Test Windowing Functions +select count(c_int) over() from t1; +select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1; +select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1; +select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1; +select 1+sum(c_int) over() from t1; +select sum(c_int)+sum(sum(c_int)) over() from t1; +select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1; +select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1; + +-- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1; +create view v2 as select c_int, value from t2; + +select value from v1 where c_boolean=false; +select max(c_int) from v1 group by (c_boolean); + +select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int; +select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; + +select count(*) from v1 a join v1 b on a.value = b.value; + +create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean; + +select count(val) from v3 where val != '1'; +with q1 as ( select key from t1 where key = '1') +select count(*) from q1; + +with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 ; + +create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +; + +with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a; + +with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; + + +drop view v1; +drop view v2; +drop view v3; +drop view v4; + +-- 11. Union All +select * from t1 union all select * from t2 order by key; +select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key; +select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key; + +-- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key; +select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0); +select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0); +select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; + +-- 13. null expr in select list +select null from t3; + +-- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6; + +-- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' ; + +-- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +; + +-- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +; + +-- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +; + +-- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +; + +-- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +; + +-- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) +; + +-- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) +; + +-- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +; + +-- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +; + +-- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +; + +-- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +; + +-- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +; + +-- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +; + +-- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +; + +-- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +; + +select * from cv1 +; + +-- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +; + +-- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +; + +-- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true; + + diff --git a/ql/src/test/results/clientpositive/cbo_correctness.q.out b/ql/src/test/results/clientpositive/cbo_correctness.q.out new file mode 100644 index 0000000..1c302e9 --- /dev/null +++ b/ql/src/test/results/clientpositive/cbo_correctness.q.out @@ -0,0 +1,18958 @@ +PREHOOK: query: drop table if exists t1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +PREHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t3 +POSTHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t3 +PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@dt=2014 +PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2@dt=2014 +PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@part +POSTHOOK: query: CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@part +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@part +PREHOOK: query: DROP TABLE lineitem +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE lineitem +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@lineitem +POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@lineitem +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@lineitem +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@lineitem +PREHOOK: query: create table src_cbo as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_cbo +POSTHOOK: query: create table src_cbo as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_cbo +PREHOOK: query: analyze table t1 partition (dt) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Output: default@t1 +PREHOOK: Output: default@t1@dt=2014 +POSTHOOK: query: analyze table t1 partition (dt) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@dt=2014 +PREHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: analyze table t2 partition (dt) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Output: default@t2 +PREHOOK: Output: default@t2@dt=2014 +POSTHOOK: query: analyze table t2 partition (dt) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2@dt=2014 +PREHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: analyze table t3 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +PREHOOK: Output: default@t3 +POSTHOOK: query: analyze table t3 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@t3 +PREHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +PREHOOK: query: analyze table src_cbo compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +PREHOOK: Output: default@src_cbo +POSTHOOK: query: analyze table src_cbo compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: default@src_cbo +PREHOOK: query: analyze table src_cbo compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: analyze table src_cbo compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +PREHOOK: query: analyze table part compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@part +PREHOOK: Output: default@part +POSTHOOK: query: analyze table part compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +POSTHOOK: Output: default@part +PREHOOK: query: analyze table part compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: analyze table part compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +PREHOOK: query: analyze table lineitem compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +PREHOOK: Output: default@lineitem +POSTHOOK: query: analyze table lineitem compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +POSTHOOK: Output: default@lineitem +PREHOOK: query: analyze table lineitem compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: analyze table lineitem compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +PREHOOK: query: -- 1. Test Select + TS +select * from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 1. Test Select + TS +select * from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from t1 as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from t1 as t2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +null NULL NULL +null NULL NULL +PREHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.key from t1 join t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key from t1 join t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +PREHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL +1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL + 1 4 2 + 1 4 2 +1 4 12 +1 4 2 +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +2 5.0 +12 5.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL +null NULL +1 1 +1 1 +1 1 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +20 18 18 1.0 1 1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 18 18 1.0 1 1 2 36 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +20 1 18 1.0 1 1 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 1 18 1.0 1 1 2 36 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 20 1 18 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 20 1 1 +PREHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 1.0 1 +2 1.0 1 +12 1.0 1 +2 1.0 1 +0 NULL null +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select 1+sum(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select 1+sum(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +36 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 +POSTHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v1 +PREHOOK: query: create view v2 as select c_int, value from t2 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v2 +POSTHOOK: query: create view v2 as select c_int, value from t2 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v2 +PREHOOK: query: select value from v1 where c_boolean=false +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select value from v1 where c_boolean=false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +1 +1 +PREHOOK: query: select max(c_int) from v1 group by (c_boolean) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +NULL +1 +1 +PREHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v2 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v2 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +160 +PREHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Input: default@v1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v3 +POSTHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@v1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v3 +PREHOOK: query: select count(val) from v3 where val != '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v3 +#### A masked pattern was here #### +POSTHOOK: query: select count(val) from v3 where val != '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v3 +#### A masked pattern was here #### +96 +PREHOOK: query: with q1 as ( select key from t1 where key = '1') +select count(*) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select key from t1 where key = '1') +select count(*) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +12 +PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v4 +POSTHOOK: query: create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v4 +PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v4 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v4 +#### A masked pattern was here #### +31104 +PREHOOK: query: drop view v1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: drop view v1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop view v2 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v2 +PREHOOK: Output: default@v2 +POSTHOOK: query: drop view v2 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v2 +POSTHOOK: Output: default@v2 +PREHOOK: query: drop view v3 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v3 +PREHOOK: Output: default@v3 +POSTHOOK: query: drop view v3 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v3 +POSTHOOK: Output: default@v3 +PREHOOK: query: drop view v4 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v4 +PREHOOK: Output: default@v4 +POSTHOOK: query: drop view v4 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v4 +POSTHOOK: Output: default@v4 +PREHOOK: query: -- 11. Union All +select * from t1 union all select * from t2 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 11. Union All +select * from t1 union all select * from t2 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +PREHOOK: query: -- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +NULL +NULL +PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +PREHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: -- 13. null expr in select list +select null from t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: -- 13. null expr in select list +select null from t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: -- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +400 +PREHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +almond antique blue firebrick mint 31 +almond antique burnished rose metallic 2 +almond antique burnished rose metallic 2 +almond antique chartreuse khaki white 17 +almond antique chartreuse lavender yellow 34 +almond antique forest lavender goldenrod 14 +almond antique gainsboro frosted violet 10 +almond antique medium spring khaki 6 +almond antique metallic orange dim 19 +almond antique misty red olive 1 +almond antique olive coral navajo 45 +almond antique salmon chartreuse burlywood 6 +almond antique sky peru orange 2 +almond antique violet chocolate turquoise 14 +almond antique violet mint lemon 39 +almond antique violet turquoise frosted 40 +almond aquamarine burnished black steel 28 +almond aquamarine dodger light gainsboro 46 +almond aquamarine floral ivory bisque 27 +almond aquamarine midnight light salmon 2 +almond aquamarine pink moccasin thistle 42 +almond aquamarine rose maroon antique 25 +almond aquamarine sandy cyan gainsboro 18 +almond aquamarine yellow dodger mint 7 +almond azure aquamarine papaya violet 12 +almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +450 1 +7068 1 +21636 1 +22630 1 +59694 1 +61931 1 +85951 1 +88035 1 +88362 1 +106170 1 +119477 1 +119767 1 +123076 1 +139636 1 +175839 1 +182052 1 +PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +4297 1798 +108570 8571 +PREHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +80 val_80 1 +96 val_96 1 +92 val_92 1 +9 val_9 1 +87 val_87 1 +86 val_86 1 +85 val_85 1 +82 val_82 1 +84 val_84 2 +95 val_95 2 +83 val_83 2 +98 val_98 2 +97 val_97 2 +90 val_90 3 +PREHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2.0 +Manufacturer#3 almond antique misty red olive 1.0 +Manufacturer#5 almond antique sky peru orange 2.0 +Manufacturer#2 almond aquamarine midnight light salmon 2.0 +Manufacturer#4 almond aquamarine yellow dodger mint 7.0 +PREHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +PREHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src_cbo +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 +POSTHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cv1 +PREHOOK: query: select * from cv1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cv1 +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: select * from cv1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 +PREHOOK: query: -- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/auto_join0.q.out b/ql/src/test/results/clientpositive/tez/auto_join0.q.out index 8b5ea51..2ae9e5a 100644 --- a/ql/src/test/results/clientpositive/tez/auto_join0.q.out +++ b/ql/src/test/results/clientpositive/tez/auto_join0.q.out @@ -56,6 +56,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/auto_join1.q.out b/ql/src/test/results/clientpositive/tez/auto_join1.q.out index cd07d67..d15c90b 100644 --- a/ql/src/test/results/clientpositive/tez/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/tez/auto_join1.q.out @@ -45,6 +45,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0, _col6 + input vertices: + 0 Map 2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out index b3f16c1..512349d 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out @@ -145,6 +145,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col7 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) @@ -222,6 +224,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) @@ -337,6 +341,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col6, _col7 + input vertices: + 1 Map 4 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col6 (type: int), _col7 (type: string) @@ -390,6 +396,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Map 1 Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) @@ -450,6 +458,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) @@ -480,6 +490,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 4 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string) @@ -604,6 +616,9 @@ STAGE PLANS: 1 key (type: int) 2 key (type: int) outputColumnNames: _col0, _col1, _col7 + input vertices: + 1 Map 1 + 2 Map 2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) @@ -674,6 +689,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string) @@ -689,6 +706,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) @@ -763,6 +782,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) @@ -864,6 +885,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) @@ -958,6 +981,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0, _col1, _col7 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) @@ -1050,6 +1075,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col6 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) @@ -1130,6 +1157,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Map 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -1141,6 +1170,8 @@ STAGE PLANS: 0 _col1 (type: string) 1 value (type: string) outputColumnNames: _col0, _col12 + input vertices: + 1 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col12 (type: int) diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out index ad09d3e..19b8307 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out @@ -155,6 +155,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Map 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -166,6 +168,8 @@ STAGE PLANS: 0 _col1 (type: string) 1 value (type: string) outputColumnNames: _col0, _col12 + input vertices: + 1 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col12 (type: int) @@ -258,6 +262,8 @@ STAGE PLANS: 0 UDFToDouble(key) (type: double) 1 UDFToDouble(key) (type: double) outputColumnNames: _col0, _col1, _col6 + input vertices: + 0 Map 2 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) @@ -330,6 +336,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 2 Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -405,6 +413,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 2 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -497,6 +507,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Map 1 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -550,6 +562,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Reducer 3 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -641,6 +655,8 @@ STAGE PLANS: 0 UDFToDouble(_col0) (type: double) 1 UDFToDouble(key) (type: double) outputColumnNames: _col0, _col2 + input vertices: + 0 Reducer 3 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out b/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out new file mode 100644 index 0000000..93e3f59 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out @@ -0,0 +1,15948 @@ +PREHOOK: query: drop table if exists t1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t4 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t4 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +PREHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t3 +PREHOOK: query: create table t4(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table t4(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t4 +PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: load data local inpath '../../data/files/cbo_t4.txt' into table t4 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t4 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t4.txt' into table t4 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t4 +PREHOOK: query: analyze table t1 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: analyze table t1 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 +PREHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +PREHOOK: query: analyze table t2 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: analyze table t2 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +PREHOOK: query: analyze table t3 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +PREHOOK: Output: default@t3 +POSTHOOK: query: analyze table t3 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@t3 +PREHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +PREHOOK: query: analyze table t4 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t4 +PREHOOK: Output: default@t4 +POSTHOOK: query: analyze table t4 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t4 +POSTHOOK: Output: default@t4 +PREHOOK: query: analyze table t4 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t4 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t4 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t4 +#### A masked pattern was here #### +PREHOOK: query: -- 1. Test Select + TS +select * from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 1. Test Select + TS +select * from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +null null NULL NULL NULL +null null NULL NULL NULL +PREHOOK: query: select * from t1 as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +null null NULL NULL NULL +null null NULL NULL NULL +PREHOOK: query: select * from t1 as t2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +null null NULL NULL NULL +null null NULL NULL NULL +PREHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +null NULL NULL +null NULL NULL +PREHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true + 1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 true +1 1 1 1.0 false +1 1 1 1.0 false +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +NULL +NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +PREHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +PREHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +NULL +1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +null NULL NULL + 1 4 2 + 1 4 2 +1 4 12 +1 4 2 +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +NULL NULL +2 5.0 +12 5.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +NULL +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +null NULL NULL +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +null NULL +null NULL +1 1 +1 1 +1 1 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: -- 8. Test UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 8. Test UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +20 18 18 1.0 1 1 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +20 1 18 1.0 1 1 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 20 1 18 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 20 1 1 +PREHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +2 1.0 1 +2 1.0 1 +12 1.0 1 +2 1.0 1 +0 NULL null +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select * from (select count(c_int) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out index 21e1a6d..66d287a 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out @@ -64,6 +64,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) @@ -118,6 +120,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: @@ -152,6 +156,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + input vertices: + 0 Map 1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) @@ -213,6 +219,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -256,6 +264,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Reducer 2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) @@ -324,6 +334,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -362,6 +374,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Reducer 2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) @@ -436,6 +450,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -519,6 +535,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 + input vertices: + 1 Reducer 2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out index a7d6742..a2f677d 100644 --- a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out +++ b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out @@ -120,6 +120,8 @@ STAGE PLANS: 0 dec (type: decimal(4,2)) 1 dec (type: decimal(4,0)) outputColumnNames: _col0, _col4 + input vertices: + 1 Map 1 Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0)) diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out index 5dfbfcd..0a302a4 100644 --- a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out @@ -83,6 +83,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Map Join Operator @@ -96,6 +98,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 2 Position of Big Table: 0 Statistics: Num rows: 34 Data size: 7032 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -489,6 +493,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -500,6 +506,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 2 Statistics: Num rows: 9 Data size: 2181 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -581,6 +589,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0, _col2 + input vertices: + 1 Map 4 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -592,6 +602,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col2 + input vertices: + 1 Map 3 Statistics: Num rows: 34 Data size: 7032 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out index 0f8f3af..4c75b90 100644 --- a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out +++ b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out @@ -1207,7 +1207,7 @@ POSTHOOK: Input: default@test2@ds=1/hr=1 POSTHOOK: Input: default@test2@ds=1/hr=2 POSTHOOK: Input: default@test2@ds=1/hr=3 #### A masked pattern was here #### -1 3 +1 1 PREHOOK: query: explain extended select ds, count(hr) from TEST2 group by ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select ds, count(hr) from TEST2 group by ds @@ -1669,7 +1669,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 2008-04-08 -2008-04-09 PREHOOK: query: select min(ds),max(ds) from srcpart PREHOOK: type: QUERY PREHOOK: Input: default@srcpart @@ -1686,7 +1685,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -2008-04-08 2008-04-09 +2008-04-08 2008-04-08 PREHOOK: query: -- HIVE-3594 URI encoding for temporary path alter table TEST2 add partition (ds='01:10:10', hr='01') PREHOOK: type: ALTERTABLE_ADDPARTS @@ -2044,5 +2043,3 @@ POSTHOOK: Input: default@test2@ds=1/hr=2 POSTHOOK: Input: default@test2@ds=1/hr=3 #### A masked pattern was here #### 01:10:10 1 -01:10:20 1 -1 3 diff --git a/ql/src/test/results/clientpositive/tez/mrr.q.out b/ql/src/test/results/clientpositive/tez/mrr.q.out index 2d4a6cf..d186f8b 100644 --- a/ql/src/test/results/clientpositive/tez/mrr.q.out +++ b/ql/src/test/results/clientpositive/tez/mrr.q.out @@ -853,6 +853,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col5, _col6 + input vertices: + 0 Map 4 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: string), _col6 (type: string) @@ -1705,6 +1707,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/tez_union.q.out b/ql/src/test/results/clientpositive/tez/tez_union.q.out index 7f94016..e0a465a 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union.q.out @@ -36,6 +36,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 @@ -355,6 +357,8 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col5 + input vertices: + 0 Map 1 Select Operator expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 @@ -383,6 +387,8 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col5 + input vertices: + 0 Map 1 Select Operator expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 @@ -535,6 +541,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 5 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -551,6 +559,9 @@ STAGE PLANS: 1 _col0 (type: string) 2 key (type: string) outputColumnNames: _col0, _col5, _col6 + input vertices: + 0 Map 1 + 2 Map 8 Select Operator expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 @@ -600,6 +611,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 6 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -616,6 +629,9 @@ STAGE PLANS: 1 _col0 (type: string) 2 key (type: string) outputColumnNames: _col0, _col5, _col6 + input vertices: + 0 Map 1 + 2 Map 8 Select Operator expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 @@ -662,6 +678,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 10 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -678,6 +696,9 @@ STAGE PLANS: 1 _col0 (type: string) 2 key (type: string) outputColumnNames: _col0, _col5, _col6 + input vertices: + 0 Map 1 + 2 Map 8 Select Operator expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 @@ -883,6 +904,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -924,6 +947,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 5 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -1057,6 +1082,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 @@ -1100,6 +1127,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 @@ -1167,6 +1196,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col2 + input vertices: + 0 Union 2 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string)