Index: . =================================================================== --- . (revision 1637277) +++ . (working copy) Property changes on: . ___________________________________________________________________ Modified: svn:mergeinfo Merged /hive/trunk:r1635378-1636884,1636886-1636887,1636889-1637277 Index: cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java =================================================================== --- cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java (revision 1637277) +++ cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java (working copy) @@ -58,6 +58,7 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.service.HiveClient; import org.apache.hadoop.hive.service.HiveServerException; +import org.apache.hadoop.util.Shell; import org.apache.thrift.TException; @@ -375,8 +376,14 @@ } } + private static void setEnv(String key, String value) throws Exception { + if (Shell.WINDOWS) + setEnvWindows(key, value); + else + setEnvLinux(key, value); + } - private static void setEnv(String key, String value) throws Exception { + private static void setEnvLinux(String key, String value) throws Exception { Class[] classes = Collections.class.getDeclaredClasses(); Map env = (Map) System.getenv(); for (Class cl : classes) { @@ -394,7 +401,27 @@ } } + private static void setEnvWindows(String key, String value) throws Exception { + Class processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); + Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); + theEnvironmentField.setAccessible(true); + Map env = (Map) theEnvironmentField.get(null); + if (value == null) { + env.remove(key); + } else { + env.put(key, value); + } + Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment"); + theCaseInsensitiveEnvironmentField.setAccessible(true); + Map cienv = (Map) theCaseInsensitiveEnvironmentField.get(null); + if (value == null) { + cienv.remove(key); + } else { + cienv.put(key, value); + } + } + private static class FakeCliDriver extends CliDriver { @Override Index: common/pom.xml =================================================================== --- common/pom.xml (revision 1637277) +++ common/pom.xml (working copy) @@ -118,14 +118,6 @@ dist - - - ../conf/ - - hive-default.xml.template - - - org.apache.maven.plugins Index: common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java =================================================================== --- common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java (revision 1637277) +++ common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java (working copy) @@ -266,4 +266,17 @@ return bd; } + + public static HiveDecimal enforcePrecisionScale(HiveDecimal dec, int maxPrecision, int maxScale) { + if (dec == null) { + return null; + } + + BigDecimal bd = enforcePrecisionScale(dec.bd, maxPrecision, maxScale); + if (bd == null) { + return null; + } + + return HiveDecimal.create(bd); + } } Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1637277) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -393,9 +393,11 @@ "the connection URL, before the next metastore query that accesses the\n" + "datastore. Once reloaded, this value is reset to false. Used for\n" + "testing only."), + METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024, + "Maximum message size in bytes a HMS will accept."), METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200, "Minimum number of worker threads in the Thrift server's pool."), - METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 100000, + METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000, "Maximum number of worker threads in the Thrift server's pool."), METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true, "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."), @@ -1597,6 +1599,9 @@ "table. From 0.12 onwards, they are displayed separately. This flag will let you\n" + "get old behavior, if desired. See, test-case in patch for HIVE-6689."), + HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv2Hello,SSLv3", + "SSL Versions to disable for all Hive Servers"), + // HiveServer2 specific configs HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null), "Number of times HiveServer2 will attempt to start before exiting, sleeping 60 seconds " + @@ -1622,6 +1627,8 @@ "Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'."), HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice", "Path component of URL endpoint when in HTTP mode."), + HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE("hive.server2.thrift.max.message.size", 100*1024*1024, + "Maximum message size in bytes a HS2 server will accept."), HIVE_SERVER2_THRIFT_HTTP_MIN_WORKER_THREADS("hive.server2.thrift.http.min.worker.threads", 5, "Minimum number of worker threads when in HTTP mode."), HIVE_SERVER2_THRIFT_HTTP_MAX_WORKER_THREADS("hive.server2.thrift.http.max.worker.threads", 500, @@ -1922,7 +1929,15 @@ TEZ_SMB_NUMBER_WAVES( "hive.tez.smb.number.waves", (float) 0.5, - "The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.") + "The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave."), + TEZ_EXEC_SUMMARY( + "hive.tez.exec.print.summary", + false, + "Display breakdown of execution steps, for every query executed by the shell."), + TEZ_EXEC_INPLACE_PROGRESS( + "hive.tez.exec.inplace.progress", + true, + "Updates tez job execution progress in-place in the terminal.") ; public final String varname; @@ -2588,6 +2603,7 @@ "hive\\.auto\\..*", "hive\\.cbo\\..*", "hive\\.convert\\..*", + "hive\\.exec\\.dynamic\\.partition.*", "hive\\.exec\\..*\\.dynamic\\.partitions\\..*", "hive\\.exec\\.compress\\..*", "hive\\.exec\\.infer\\..*", Index: common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java =================================================================== --- common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java (revision 1637277) +++ common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.conf; import java.io.BufferedReader; +import java.io.File; import java.io.InputStreamReader; import junit.framework.TestCase; @@ -75,31 +76,37 @@ assertEquals(true, logCreated); } - private void RunTest(String cleanCmd, String findCmd, String logFile, + public void cleanLog(File logFile) { + if (logFile.exists()) { + logFile.delete(); + } + File logFileDir = logFile.getParentFile(); + if (logFileDir.exists()) { + logFileDir.delete(); + } + } + + private void RunTest(File logFile, String hiveLog4jProperty, String hiveExecLog4jProperty) throws Exception { // clean test space - runCmd(cleanCmd); + cleanLog(logFile); + assertFalse(logFile + " should not exist", logFile.exists()); // config log4j with customized files // check whether HiveConf initialize log4j correctly configLog(hiveLog4jProperty, hiveExecLog4jProperty); // check whether log file is created on test running - runCmd(findCmd); - getCmdOutput(logFile); - - // clean test space - runCmd(cleanCmd); + assertTrue(logFile + " should exist", logFile.exists()); } public void testHiveLogging() throws Exception { - // customized log4j config log file to be: /tmp/TestHiveLogging/hiveLog4jTest.log - String customLogPath = "/tmp/" + System.getProperty("user.name") + "-TestHiveLogging/"; + // customized log4j config log file to be: /${test.tmp.dir}/TestHiveLogging/hiveLog4jTest.log + File customLogPath = new File(new File(System.getProperty("test.tmp.dir")), + System.getProperty("user.name") + "-TestHiveLogging/"); String customLogName = "hiveLog4jTest.log"; - String customLogFile = customLogPath + customLogName; - String customCleanCmd = "rm -rf " + customLogFile; - String customFindCmd = "find " + customLogPath + " -name " + customLogName; - RunTest(customCleanCmd, customFindCmd, customLogFile, + File customLogFile = new File(customLogPath, customLogName); + RunTest(customLogFile, "hive-log4j-test.properties", "hive-exec-log4j-test.properties"); } } Index: common/src/test/resources/hive-exec-log4j-test.properties =================================================================== --- common/src/test/resources/hive-exec-log4j-test.properties (revision 1637277) +++ common/src/test/resources/hive-exec-log4j-test.properties (working copy) @@ -1,6 +1,6 @@ # Define some default values that can be overridden by system properties hive.root.logger=INFO,FA -hive.log.dir=/tmp/${user.name}-TestHiveLogging +hive.log.dir=/${test.tmp.dir}/${user.name}-TestHiveLogging hive.log.file=hiveExecLog4jTest.log # Define the root logger to the system property "hadoop.root.logger". Index: common/src/test/resources/hive-log4j-test.properties =================================================================== --- common/src/test/resources/hive-log4j-test.properties (revision 1637277) +++ common/src/test/resources/hive-log4j-test.properties (working copy) @@ -1,6 +1,6 @@ # Define some default values that can be overridden by system properties hive.root.logger=WARN,DRFA -hive.log.dir=/tmp/${user.name}-TestHiveLogging +hive.log.dir=${test.tmp.dir}/${user.name}-TestHiveLogging hive.log.file=hiveLog4jTest.log # Define the root logger to the system property "hadoop.root.logger". Index: data/conf/fair-scheduler-test.xml =================================================================== --- data/conf/fair-scheduler-test.xml (revision 0) +++ data/conf/fair-scheduler-test.xml (working copy) @@ -0,0 +1,16 @@ + + + + * + + + * + + + + + + + + + Index: data/scripts/q_test_cleanup.sql =================================================================== --- data/scripts/q_test_cleanup.sql (revision 1637277) +++ data/scripts/q_test_cleanup.sql (working copy) @@ -16,3 +16,10 @@ DROP TABLE IF EXISTS dest_g1; DROP TABLE IF EXISTS dest_g2; DROP TABLE IF EXISTS fetchtask_ioexception; + +DROP TABLE IF EXISTS cbo_t1; +DROP TABLE IF EXISTS cbo_t2; +DROP TABLE IF EXISTS cbo_t3; +DROP TABLE IF EXISTS src_cbo; +DROP TABLE IF EXISTS part; +DROP TABLE IF EXISTS lineitem; Index: data/scripts/q_test_init.sql =================================================================== --- data/scripts/q_test_init.sql (revision 1637277) +++ data/scripts/q_test_init.sql (working copy) @@ -243,3 +243,79 @@ STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat'; + + +-- +-- CBO tables +-- + +drop table if exists cbo_t1; +drop table if exists cbo_t2; +drop table if exists cbo_t3; +drop table if exists src_cbo; +drop table if exists part; +drop table if exists lineitem; + +set hive.cbo.enable=true; + +create table cbo_t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; +create table cbo_t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; +create table cbo_t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE; + +load data local inpath '../../data/files/cbo_t1.txt' into table cbo_t1 partition (dt='2014'); +load data local inpath '../../data/files/cbo_t2.txt' into table cbo_t2 partition (dt='2014'); +load data local inpath '../../data/files/cbo_t3.txt' into table cbo_t3; + +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +); + +LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; + +CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|'; + +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; + +create table src_cbo as select * from src; + + +analyze table cbo_t1 partition (dt) compute statistics; +analyze table cbo_t1 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table cbo_t2 partition (dt) compute statistics; +analyze table cbo_t2 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table cbo_t3 compute statistics; +analyze table cbo_t3 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table src_cbo compute statistics; +analyze table src_cbo compute statistics for columns; +analyze table part compute statistics; +analyze table part compute statistics for columns; +analyze table lineitem compute statistics; +analyze table lineitem compute statistics for columns; + +reset; +set hive.stats.dbclass=fs; Index: hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/HCatRecordSerDe.java =================================================================== --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/HCatRecordSerDe.java (revision 1637277) +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/HCatRecordSerDe.java (working copy) @@ -23,7 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.TreeMap; +import java.util.HashMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.serde.serdeConstants; @@ -212,7 +212,7 @@ private static Map serializeMap(Object f, MapObjectInspector moi) throws SerDeException { ObjectInspector koi = moi.getMapKeyObjectInspector(); ObjectInspector voi = moi.getMapValueObjectInspector(); - Map m = new TreeMap(); + Map m = new HashMap(); Map readMap = moi.getMap(f); if (readMap == null) { Index: hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java =================================================================== --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java (revision 1637277) +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java (working copy) @@ -27,11 +27,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.Reporter; @@ -44,14 +42,16 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.hcatalog.common.ErrorType; import org.apache.hive.hcatalog.common.HCatException; -import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.HCatRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Record writer container for tables using dynamic partitioning. See * {@link FileOutputFormatContainer} for more information */ class DynamicPartitionFileRecordWriterContainer extends FileRecordWriterContainer { + private static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionFileRecordWriterContainer.class); private final List dynamicPartCols; private int maxDynamicPartitions; @@ -97,14 +97,36 @@ // TaskInputOutput. bwriter.close(reporter); } - for (Map.Entry entry : baseDynamicCommitters - .entrySet()) { - org.apache.hadoop.mapred.TaskAttemptContext currContext = dynamicContexts.get(entry.getKey()); - OutputCommitter baseOutputCommitter = entry.getValue(); - if (baseOutputCommitter.needsTaskCommit(currContext)) { - baseOutputCommitter.commitTask(currContext); + + TaskCommitContextRegistry.getInstance().register(context, new TaskCommitContextRegistry.TaskCommitterProxy() { + @Override + public void abortTask(TaskAttemptContext context) throws IOException { + for (Map.Entry outputJobInfoEntry : dynamicOutputJobInfo.entrySet()) { + String dynKey = outputJobInfoEntry.getKey(); + OutputJobInfo outputJobInfo = outputJobInfoEntry.getValue(); + LOG.info("Aborting task-attempt for " + outputJobInfo.getLocation()); + baseDynamicCommitters.get(dynKey) + .abortTask(dynamicContexts.get(dynKey)); + } } - } + + @Override + public void commitTask(TaskAttemptContext context) throws IOException { + for (Map.Entry outputJobInfoEntry : dynamicOutputJobInfo.entrySet()) { + String dynKey = outputJobInfoEntry.getKey(); + OutputJobInfo outputJobInfo = outputJobInfoEntry.getValue(); + LOG.info("Committing task-attempt for " + outputJobInfo.getLocation()); + TaskAttemptContext dynContext = dynamicContexts.get(dynKey); + OutputCommitter dynCommitter = baseDynamicCommitters.get(dynKey); + if (dynCommitter.needsTaskCommit(dynContext)) { + dynCommitter.commitTask(dynContext); + } + else { + LOG.info("Skipping commitTask() for " + outputJobInfo.getLocation()); + } + } + } + }); } @Override Index: hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java =================================================================== --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java (revision 1637277) +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java (working copy) @@ -118,6 +118,13 @@ public void abortTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { getBaseOutputCommitter().abortTask(HCatMapRedUtil.createTaskAttemptContext(context)); + } else { + try { + TaskCommitContextRegistry.getInstance().abortTask(context); + } + finally { + TaskCommitContextRegistry.getInstance().discardCleanupFor(context); + } } } @@ -127,6 +134,13 @@ //See HCATALOG-499 FileOutputFormatContainer.setWorkOutputPath(context); getBaseOutputCommitter().commitTask(HCatMapRedUtil.createTaskAttemptContext(context)); + } else { + try { + TaskCommitContextRegistry.getInstance().commitTask(context); + } + finally { + TaskCommitContextRegistry.getInstance().discardCleanupFor(context); + } } } @@ -136,7 +150,7 @@ return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtil.createTaskAttemptContext(context)); } else { // called explicitly through FileRecordWriterContainer.close() if dynamic - return false by default - return false; + return true; } } Index: hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java =================================================================== --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java (revision 1637277) +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java (working copy) @@ -23,11 +23,20 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; +import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcFile; import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat; +import org.apache.hadoop.hive.serde2.avro.AvroSerDe; +import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; +import java.util.ArrayList; +import java.util.List; import java.util.Map; +import java.util.Properties; /** * This class is a place to put all the code associated with @@ -82,6 +91,35 @@ jobProperties.put(propName,tableProps.get(propName)); } } + } else if (ofclass == AvroContainerOutputFormat.class) { + // Special cases for Avro. As with ORC, we make table properties that + // Avro is interested in available in jobconf at runtime + Map tableProps = jobInfo.getTableInfo().getTable().getParameters(); + for (AvroSerdeUtils.AvroTableProperties property : AvroSerdeUtils.AvroTableProperties.values()) { + String propName = property.getPropName(); + if (tableProps.containsKey(propName)){ + String propVal = tableProps.get(propName); + jobProperties.put(propName,tableProps.get(propName)); + } + } + + Properties properties = new Properties(); + properties.put("name",jobInfo.getTableName()); + + List colNames = jobInfo.getOutputSchema().getFieldNames(); + List colTypes = new ArrayList(); + for (HCatFieldSchema field : jobInfo.getOutputSchema().getFields()){ + colTypes.add(TypeInfoUtils.getTypeInfoFromTypeString(field.getTypeString())); + } + + jobProperties.put(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), + AvroSerDe.getSchemaFromCols(properties, colNames, colTypes, null).toString()); + + + for (String propName : jobProperties.keySet()){ + String propVal = jobProperties.get(propName); + } + } } Index: hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java =================================================================== --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java (revision 0) +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java (working copy) @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hive.hcatalog.mapreduce; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hive.hcatalog.common.HCatConstants; +import org.apache.hive.hcatalog.common.HCatUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Singleton Registry to track the commit of TaskAttempts. + * Used to manage commits for Tasks that create dynamic-partitions. + */ +public class TaskCommitContextRegistry { + + private static final Logger LOG = LoggerFactory.getLogger(TaskCommitContextRegistry.class); + + private static TaskCommitContextRegistry ourInstance = new TaskCommitContextRegistry(); + + /** + * Singleton instance getter. + */ + public static TaskCommitContextRegistry getInstance() { + return ourInstance; + } + + /** + * Implement this interface to register call-backs for committing TaskAttempts. + */ + public static interface TaskCommitterProxy { + + /** + * Call-back for Committer's abortTask(). + */ + public void abortTask(TaskAttemptContext context) throws IOException; + + /** + * Call-back for Committer's abortTask(). + */ + public void commitTask(TaskAttemptContext context) throws IOException; + } + + private HashMap taskCommitters + = new HashMap(); + + /** + * Trigger commit for TaskAttempt, as specified by the TaskAttemptContext argument. + */ + public synchronized void commitTask(TaskAttemptContext context) throws IOException { + String key = generateKey(context); + if (!taskCommitters.containsKey(key)) { + throw new IOException("No callback registered for TaskAttemptID:" + key); + } + + try { + LOG.info("Committing TaskAttempt:" + key); + taskCommitters.get(key).commitTask(context); + } + catch (Throwable t) { + throw new IOException("Could not clean up TaskAttemptID:" + key, t); + } + + } + + private String generateKey(TaskAttemptContext context) throws IOException { + String jobInfoString = context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO); + if (StringUtils.isBlank(jobInfoString)) { // Avoid the NPE. + throw new IOException("Could not retrieve OutputJobInfo for TaskAttempt " + context.getTaskAttemptID()); + } + OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(jobInfoString); + return context.getTaskAttemptID().toString() + "@" + jobInfo.getLocation(); + } + + /** + * Trigger abort for TaskAttempt, as specified by the TaskAttemptContext argument. + */ + public synchronized void abortTask(TaskAttemptContext context) throws IOException { + String key = generateKey(context); + if (!taskCommitters.containsKey(key)) { + throw new IOException("No callback registered for TaskAttemptID:" + key); + } + + try { + LOG.info("Aborting TaskAttempt:" + key); + taskCommitters.get(key).abortTask(context); + } + catch (Throwable t) { + throw new IOException("Could not clean up TaskAttemptID:" + key, t); + } + } + + /** + * Method to register call-backs to control commits and aborts of TaskAttempts. + * @param context The TaskAttemptContext instance for the task-attempt, identifying the output. + * @param committer Instance of TaskCommitterProxy, to commit/abort a TaskAttempt. + * @throws java.io.IOException On failure. + */ + public synchronized void register(TaskAttemptContext context, TaskCommitterProxy committer) throws IOException { + String key = generateKey(context); + LOG.info("Registering committer for TaskAttemptID:" + key); + if (taskCommitters.containsKey(key)) { + LOG.warn("Replacing previous committer:" + committer); + } + taskCommitters.put(key, committer); + } + + /** + * Method to discard the committer call-backs for a specified TaskAttemptID. + * @param context The TaskAttemptContext instance for the task-attempt, identifying the output. + * @throws java.io.IOException On failure. + */ + public synchronized void discardCleanupFor(TaskAttemptContext context) throws IOException { + String key = generateKey(context); + LOG.info("Discarding all cleanup for TaskAttemptID:" + key); + if (!taskCommitters.containsKey(key)) { + LOG.warn("No committer registered for TaskAttemptID:" + key); + } + else { + taskCommitters.remove(key); + } + } + + // Hide constructor, for make benefit glorious Singleton. + private TaskCommitContextRegistry() { + } +} Index: hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java =================================================================== --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java (revision 1637277) +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java (working copy) @@ -64,7 +64,7 @@ String tmpDir = System.getProperty("test.tmp.dir"); File dir = new File(tmpDir + "/hive-junit-" + System.nanoTime()); - response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '" + dir.getAbsolutePath() + "'"); + response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '" + dir.toURI().getPath() + "'"); assertEquals(0, response.getResponseCode()); assertNull(response.getErrorMessage()); Index: hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java =================================================================== --- hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java (revision 1637277) +++ hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java (working copy) @@ -480,7 +480,9 @@ Map result = new HashMap(); for (Entry entry : map.entrySet()) { // since map key for Pig has to be Strings - result.put(entry.getKey().toString(), extractPigObject(entry.getValue(), hfs.getMapValueSchema().get(0))); + if (entry.getKey()!=null) { + result.put(entry.getKey().toString(), extractPigObject(entry.getValue(), hfs.getMapValueSchema().get(0))); + } } return result; } Index: hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java =================================================================== --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java (revision 1637277) +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java (working copy) @@ -101,12 +101,6 @@ private static final Map> DISABLED_STORAGE_FORMATS = new HashMap>() {{ - put(IOConstants.AVRO, new HashSet() {{ - add("testReadDataBasic"); - add("testReadPartitionedBasic"); - add("testProjectionsBasic"); - add("testSchemaLoadPrimitiveTypes"); - }}); put(IOConstants.PARQUETFILE, new HashSet() {{ add("testReadDataBasic"); add("testReadPartitionedBasic"); Index: hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java =================================================================== --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java (revision 1637277) +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java (working copy) @@ -18,8 +18,6 @@ */ package org.apache.hive.hcatalog.pig; -import com.google.common.collect.ImmutableSet; - import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -52,7 +50,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -76,14 +73,13 @@ private static final Map> DISABLED_STORAGE_FORMATS = new HashMap>() {{ put(IOConstants.AVRO, new HashSet() {{ - add("testSyntheticComplexSchema"); - add("testTupleInBagInTupleInBag"); - add("testMapWithComplexData"); + add("testMapNullKey"); }}); put(IOConstants.PARQUETFILE, new HashSet() {{ add("testSyntheticComplexSchema"); add("testTupleInBagInTupleInBag"); add("testMapWithComplexData"); + add("testMapNullKey"); }}); }}; @@ -223,6 +219,10 @@ private void verifyWriteRead(String tablename, String pigSchema, String tableSchema, List data, boolean provideSchemaToStorer) throws IOException, CommandNeedRetryException, ExecException, FrontendException { + verifyWriteRead(tablename, pigSchema, tableSchema, data, data, provideSchemaToStorer); + } + private void verifyWriteRead(String tablename, String pigSchema, String tableSchema, List data, List result, boolean provideSchemaToStorer) + throws IOException, CommandNeedRetryException, ExecException, FrontendException { MockLoader.setData(tablename + "Input", data); try { createTable(tablename, tableSchema); @@ -244,7 +244,7 @@ Iterator it = server.openIterator("X"); int i = 0; while (it.hasNext()) { - Tuple input = data.get(i++); + Tuple input = result.get(i++); Tuple output = it.next(); compareTuples(input, output); LOG.info("tuple : {} ", output); @@ -354,4 +354,40 @@ verifyWriteRead("testMapWithComplexData", pigSchema, tableSchema, data, true); verifyWriteRead("testMapWithComplexData2", pigSchema, tableSchema, data, false); } + + /** + * artificially complex nested schema to test nested schema conversion + * @throws Exception + */ + @Test + public void testMapNullKey() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); + String pigSchema = "m:map[]"; + + String tableSchema = "m map"; + + List data = new ArrayList(); + Tuple t = t( + new HashMap() { + { + put("ac test1", "test 1"); + put("ac test2", "test 2"); + put(null, "test 3"); + }; + }); + data.add(t); + + List result = new ArrayList(); + t = t( + new HashMap() { + { + put("ac test1", "test 1"); + put("ac test2", "test 2"); + }; + }); + result.add(t); + + verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, result, true); + verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, result, false); + } } Index: hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java =================================================================== --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java (revision 1637277) +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java (working copy) @@ -64,7 +64,7 @@ TestHCatLoader.executeStatementOnDriver("create external table " + tblName + " (my_small_int smallint, my_tiny_int tinyint)" + " row format delimited fields terminated by '\t' stored as textfile location '" + - dataDir + "'", driver); + dataDir.toURI().getPath() + "'", driver); TestHCatLoader.dropTable(tblName2, driver); TestHCatLoader.createTable(tblName2, "my_small_int smallint, my_tiny_int tinyint", null, driver, "textfile"); Index: hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java =================================================================== --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java (revision 1637277) +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java (working copy) @@ -76,29 +76,16 @@ private static final Map> DISABLED_STORAGE_FORMATS = new HashMap>() {{ put(IOConstants.AVRO, new HashSet() {{ - add("testBagNStruct"); - add("testDateCharTypes"); - add("testDynamicPartitioningMultiPartColsInDataNoSpec"); - add("testDynamicPartitioningMultiPartColsInDataPartialSpec"); - add("testMultiPartColsInData"); - add("testPartColsInData"); - add("testStoreFuncAllSimpleTypes"); - add("testStoreFuncSimple"); - add("testStoreInPartiitonedTbl"); - add("testStoreMultiTables"); - add("testStoreWithNoCtorArgs"); - add("testStoreWithNoSchema"); - add("testWriteChar"); - add("testWriteDate"); - add("testWriteDate2"); - add("testWriteDate3"); - add("testWriteDecimal"); - add("testWriteDecimalX"); - add("testWriteDecimalXY"); - add("testWriteSmallint"); - add("testWriteTimestamp"); - add("testWriteTinyint"); - add("testWriteVarchar"); + add("testDateCharTypes"); // incorrect precision + // expected:<0 xxxxx yyy 5.2[]> but was:<0 xxxxx yyy 5.2[0]> + add("testWriteDecimalXY"); // incorrect precision + // expected:<1.2[]> but was:<1.2[0]> + add("testWriteSmallint"); // doesn't have a notion of small, and saves the full value as an int, so no overflow + // expected: but was:<32768> + add("testWriteTimestamp"); // does not support timestamp + // TypeInfoToSchema.createAvroPrimitive : UnsupportedOperationException + add("testWriteTinyint"); // doesn't have a notion of tiny, and saves the full value as an int, so no overflow + // expected: but was:<300> }}); put(IOConstants.PARQUETFILE, new HashSet() {{ add("testBagNStruct"); Index: hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java =================================================================== --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java (revision 1637277) +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java (working copy) @@ -70,11 +70,6 @@ private static final Map> DISABLED_STORAGE_FORMATS = new HashMap>() {{ - put(IOConstants.AVRO, new HashSet() {{ - add("testStoreBasicTable"); - add("testStorePartitionedTable"); - add("testStoreTableMulti"); - }}); put(IOConstants.PARQUETFILE, new HashSet() {{ add("testStoreBasicTable"); add("testStorePartitionedTable"); Index: hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java =================================================================== --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java (revision 1637277) +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java (working copy) @@ -19,10 +19,13 @@ package org.apache.hive.hcatalog.messaging; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.hcatalog.messaging.json.JSONMessageFactory; @@ -131,6 +134,16 @@ public abstract AddPartitionMessage buildAddPartitionMessage(Table table, List partitions); /** + * Factory method for AddPartitionMessage. + * @param table The Table to which the partitions are added. + * @param partitionSpec The set of Partitions being added. + * @return AddPartitionMessage instance. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec); + + /** * Factory method for DropPartitionMessage. * @param table The Table from which the partition is dropped. * @param partition The Partition being dropped. Index: hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java =================================================================== --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java (revision 1637277) +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java (working copy) @@ -19,9 +19,12 @@ package org.apache.hive.hcatalog.messaging.json; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hive.hcatalog.messaging.AddPartitionMessage; import org.apache.hive.hcatalog.messaging.CreateDatabaseMessage; import org.apache.hive.hcatalog.messaging.CreateTableMessage; @@ -87,6 +90,14 @@ } @Override + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec) { + return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), + table.getTableName(), getPartitionKeyValues(table, partitionSpec), System.currentTimeMillis()/1000); + } + + @Override public DropPartitionMessage buildDropPartitionMessage(Table table, Partition partition) { return new JSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, partition.getDbName(), partition.getTableName(), Arrays.asList(getPartitionKeyValues(table, partition)), @@ -107,4 +118,16 @@ partitionList.add(getPartitionKeyValues(table, partition)); return partitionList; } + + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + private static List> getPartitionKeyValues(Table table, PartitionSpecProxy partitionSpec) { + List> partitionList = new ArrayList>(); + PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); + while (iterator.hasNext()) { + Partition partition = iterator.next(); + partitionList.add(getPartitionKeyValues(table, partition)); + } + return partitionList; + } } Index: hcatalog/webhcat/java-client/pom.xml =================================================================== --- hcatalog/webhcat/java-client/pom.xml (revision 1637277) +++ hcatalog/webhcat/java-client/pom.xml (working copy) @@ -47,6 +47,13 @@ + org.apache.hive + hive-exec + ${project.version} + test-jar + test + + org.apache.hive.hcatalog hive-hcatalog-core ${project.version} Index: hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java =================================================================== --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java (revision 1637277) +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java (working copy) @@ -22,6 +22,8 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.PartitionEventType; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hive.hcatalog.common.HCatException; @@ -213,6 +215,26 @@ public abstract List deserializePartitions(List hcatPartitionStringReps) throws HCatException; /** + * Serializer for HCatPartitionSpec. + * @param partitionSpec HCatPartitionSpec to be serialized. + * @return A list of Strings, representing the HCatPartitionSpec as a whole. + * @throws HCatException On failure to serialize. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract List serializePartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException; + + /** + * Deserializer for HCatPartitionSpec. + * @param hcatPartitionSpecStrings List of strings, representing the HCatPartitionSpec as a whole. + * @return HCatPartitionSpec, reconstructed from the list of strings. + * @throws HCatException On failure to deserialize. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract HCatPartitionSpec deserializePartitionSpec(List hcatPartitionSpecStrings) throws HCatException; + + /** * Creates the table like an existing table. * * @param dbName The name of the database. @@ -280,6 +302,21 @@ throws HCatException; /** + * Gets partitions in terms of generic HCatPartitionSpec instances. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract HCatPartitionSpec getPartitionSpecs(String dbName, String tableName, int maxPartitions) throws HCatException; + + /** + * Gets partitions in terms of generic HCatPartitionSpec instances. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract HCatPartitionSpec getPartitionSpecs(String dbName, String tableName, Map partitionSelector, int maxPartitions) + throws HCatException; + + /** * Gets the partition. * * @param dbName The database name. @@ -312,6 +349,17 @@ throws HCatException; /** + * Adds partitions using HCatPartitionSpec. + * @param partitionSpec The HCatPartitionSpec representing the set of partitions added. + * @return The number of partitions added. + * @throws HCatException On failure to add partitions. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract int addPartitionSpec(HCatPartitionSpec partitionSpec) + throws HCatException; + + /** * Drops partition(s) that match the specified (and possibly partial) partition specification. * A partial partition-specification is one where not all partition-keys have associated values. For example, * for a table ('myDb.myTable') with 2 partition keys (dt string, region string), @@ -344,6 +392,14 @@ String filter) throws HCatException; /** + * List partitions by filter, but as HCatPartitionSpecs. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract HCatPartitionSpec listPartitionSpecsByFilter(String dbName, String tblName, + String filter, int maxPartitions) throws HCatException; + + /** * Mark partition for event. * * @param dbName The database name. Index: hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java =================================================================== --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java (revision 1637277) +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java (working copy) @@ -25,6 +25,8 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -353,6 +355,31 @@ return listPartitionsByFilter(dbName, tblName, getFilterString(partitionSpec)); } + @Override + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public HCatPartitionSpec getPartitionSpecs(String dbName, String tableName, int maxPartitions) throws HCatException { + try { + return new HCatPartitionSpec(getTable(dbName, tableName), + hmsClient.listPartitionSpecs(dbName, tableName, maxPartitions)); + } + catch (NoSuchObjectException e) { + throw new ObjectNotFoundException( + "NoSuchObjectException while retrieving partition.", e); + } catch (MetaException e) { + throw new HCatException( + "MetaException while retrieving partition.", e); + } catch (TException e) { + throw new ConnectionFailureException( + "TException while retrieving partition.", e); + } + } + + @Override + public HCatPartitionSpec getPartitionSpecs(String dbName, String tableName, Map partitionSelector, int maxPartitions) throws HCatException { + return listPartitionSpecsByFilter(dbName, tableName, getFilterString(partitionSelector), maxPartitions); + } + private static String getFilterString(Map partitionSpec) { final String AND = " AND "; @@ -413,7 +440,7 @@ Table tbl = null; try { tbl = hmsClient.getTable(partInfo.getDatabaseName(), - partInfo.getTableName()); + partInfo.getTableName()); // TODO: Should be moved out. if (tbl.getPartitionKeysSize() == 0) { throw new HCatException("The table " + partInfo.getTableName() @@ -511,6 +538,28 @@ } @Override + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public HCatPartitionSpec listPartitionSpecsByFilter(String dbName, String tblName, String filter, int maxPartitions) + throws HCatException { + try { + return new HCatPartitionSpec(getTable(dbName, tblName), + hmsClient.listPartitionSpecsByFilter(dbName, tblName, filter, maxPartitions)); + } + catch(MetaException e) { + throw new HCatException("MetaException while fetching partitions.", e); + } + catch (NoSuchObjectException e) { + throw new ObjectNotFoundException( + "NoSuchObjectException while fetching partitions.", e); + } + catch (TException e) { + throw new ConnectionFailureException( + "TException while fetching partitions.", e); + } + } + + @Override public void markPartitionForEvent(String dbName, String tblName, Map partKVs, PartitionEventType eventType) throws HCatException { @@ -572,7 +621,7 @@ String token = null; try { token = hmsClient.getDelegationToken(owner, - renewerKerberosPrincipalName); + renewerKerberosPrincipalName); } catch (MetaException e) { throw new HCatException( "MetaException while getting delegation token.", e); @@ -750,6 +799,30 @@ } @Override + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public int addPartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException { + + try { + return hmsClient.add_partitions_pspec(partitionSpec.toPartitionSpecProxy()); + } catch (InvalidObjectException e) { + throw new HCatException( + "InvalidObjectException while adding partition.", e); + } catch (AlreadyExistsException e) { + throw new HCatException( + "AlreadyExistsException while adding partition.", e); + } catch (MetaException e) { + throw new HCatException("MetaException while adding partition.", e); + } catch (NoSuchObjectException e) { + throw new ObjectNotFoundException("The table " + + "could not be found.", e); + } catch (TException e) { + throw new ConnectionFailureException( + "TException while adding partition.", e); + } + } + + @Override public String getMessageBusTopicName(String dbName, String tableName) throws HCatException { try { return hmsClient.getTable(dbName, tableName).getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME); @@ -824,4 +897,16 @@ } return partitions; } + + @Override + public List serializePartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException { + return MetadataSerializer.get().serializePartitionSpec(partitionSpec); + } + + @Override + public HCatPartitionSpec deserializePartitionSpec(List hcatPartitionSpecStrings) throws HCatException { + HCatPartitionSpec hcatPartitionSpec = MetadataSerializer.get().deserializePartitionSpec(hcatPartitionSpecStrings); + hcatPartitionSpec.hcatTable(getTable(hcatPartitionSpec.getDbName(), hcatPartitionSpec.getTableName())); + return hcatPartitionSpec; + } } Index: hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartitionSpec.java =================================================================== --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartitionSpec.java (revision 0) +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartitionSpec.java (working copy) @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hive.hcatalog.api; + +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hive.hcatalog.common.HCatException; + +/** + * Generalized representation of a set of HCatPartitions. + */ + +@InterfaceAudience.LimitedPrivate({"Hive"}) +@InterfaceStability.Evolving +public class HCatPartitionSpec { + + protected HCatTable hcatTable; + protected PartitionSpecProxy partitionSpecProxy; + + protected HCatPartitionSpec(HCatTable hcatTable, PartitionSpecProxy partitionSpecProxy) throws HCatException { + this.hcatTable = hcatTable; + this.partitionSpecProxy = partitionSpecProxy; + assert_invariant(); + } + + /** + * Getter for DBName of this HCatPartitionSpec. + * @return The name of the DB. + */ + public String getDbName() { + return partitionSpecProxy.getDbName(); + } + + /** + * Getter for TableName of this HCatPartitionSpec. + * @return The name of the TableName. + */ + public String getTableName() { + return partitionSpecProxy.getTableName(); + } + + /** + * Setter for HCatTable. Required for deserialization. + */ + void hcatTable(HCatTable hcatTable) throws HCatException { + + assert this.hcatTable == null : "Expected hcatTable to be null at this point."; + this.hcatTable = hcatTable; + assert_invariant(); + + } + + /** + * Conversion to a Hive Metastore API PartitionSpecProxy instance. + */ + PartitionSpecProxy toPartitionSpecProxy() { + return partitionSpecProxy; + } + + /** + * Getter for the number of HCatPartitions represented by this HCatPartitionSpec instance. + * @return The number of HCatPartitions. + * @throws HCatException On failure. + */ + public int size() throws HCatException { + return partitionSpecProxy.size(); + } + + /** + * Setter for the "root" location of the HCatPartitionSpec. + * @param location The new "root" location of the HCatPartitionSpec. + * @throws HCatException On failure to set a new location. + */ + public void setRootLocation(String location) throws HCatException { + try { + partitionSpecProxy.setRootLocation(location); + } + catch (MetaException metaException) { + throw new HCatException("Unable to set root-path!", metaException); + } + } + + /** + * Getter for an Iterator to the first HCatPartition in the HCatPartitionSpec. + * @return HCatPartitionIterator to the first HCatPartition. + */ + public HCatPartitionIterator getPartitionIterator() { + return new HCatPartitionIterator(hcatTable, partitionSpecProxy.getPartitionIterator()); + } + + // Assert class invariant. + private void assert_invariant() throws HCatException { + + if (hcatTable != null) { + + if (!hcatTable.getDbName().equalsIgnoreCase(partitionSpecProxy.getDbName())) { + String errorMessage = "Invalid HCatPartitionSpec instance: Table's DBName (" + hcatTable.getDbName() + ") " + + "doesn't match PartitionSpec (" + partitionSpecProxy.getDbName() + ")"; + assert false : errorMessage; + throw new HCatException(errorMessage); + } + + if (!hcatTable.getTableName().equalsIgnoreCase(partitionSpecProxy.getTableName())) { + String errorMessage = "Invalid HCatPartitionSpec instance: Table's TableName (" + hcatTable.getTableName() + ") " + + "doesn't match PartitionSpec (" + partitionSpecProxy.getTableName() + ")"; + assert false : errorMessage; + throw new HCatException(errorMessage); + } + } + } + + + /** + * Iterator over HCatPartitions in the HCatPartitionSpec. + */ + public static class HCatPartitionIterator { // implements java.util.Iterator { + + private HCatTable hcatTable; + private PartitionSpecProxy.PartitionIterator iterator; + + HCatPartitionIterator(HCatTable hcatTable, PartitionSpecProxy.PartitionIterator iterator) { + this.hcatTable = hcatTable; + this.iterator = iterator; + } + + public boolean hasNext() { + return iterator.hasNext(); + } + + public HCatPartition next() throws HCatException { + return new HCatPartition(hcatTable, iterator.next()); + } + + public void remove() { + iterator.remove(); + } + + } // class HCatPartitionIterator; + +} // class HCatPartitionSpec; Index: hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataJSONSerializer.java =================================================================== --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataJSONSerializer.java (revision 1637277) +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataJSONSerializer.java (working copy) @@ -1,7 +1,11 @@ package org.apache.hive.hcatalog.api; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hive.hcatalog.common.HCatException; import org.apache.thrift.TDeserializer; import org.apache.thrift.TException; @@ -10,6 +14,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.List; + /** * MetadataSerializer implementation, that serializes HCat API elements into JSON. */ @@ -68,4 +75,38 @@ throw new HCatException("Could not de-serialize HCatPartition.", exception); } } + + @Override + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public List serializePartitionSpec(HCatPartitionSpec hcatPartitionSpec) throws HCatException { + try { + List stringReps = new ArrayList(); + TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); + for (PartitionSpec partitionSpec : hcatPartitionSpec.partitionSpecProxy.toPartitionSpec()) { + stringReps.add(serializer.toString(partitionSpec, "UTF-8")); + } + return stringReps; + } + catch (TException serializationException) { + throw new HCatException("Failed to serialize!", serializationException); + } + } + + @Override + public HCatPartitionSpec deserializePartitionSpec(List hcatPartitionSpecStrings) throws HCatException { + try { + List partitionSpecList = new ArrayList(); + TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory()); + for (String stringRep : hcatPartitionSpecStrings) { + PartitionSpec partSpec = new PartitionSpec(); + deserializer.deserialize(partSpec, stringRep, "UTF-8"); + partitionSpecList.add(partSpec); + } + return new HCatPartitionSpec(null, PartitionSpecProxy.Factory.get(partitionSpecList)); + } + catch (TException deserializationException) { + throw new HCatException("Failed to deserialize!", deserializationException); + } + } } Index: hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataSerializer.java =================================================================== --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataSerializer.java (revision 1637277) +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataSerializer.java (working copy) @@ -1,7 +1,11 @@ package org.apache.hive.hcatalog.api; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hive.hcatalog.common.HCatException; +import java.util.List; + /** * Interface to serialize HCat API elements. */ @@ -51,4 +55,24 @@ */ public abstract HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException; + /** + * Serializer for HCatPartitionSpec. + * @param hcatPartitionSpec HCatPartitionSpec instance to be serialized. + * @return Serialized string-representations. + * @throws HCatException On failure to serialize. + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract List serializePartitionSpec(HCatPartitionSpec hcatPartitionSpec) throws HCatException; + + /** + * Deserializer for HCatPartitionSpec string-representations. + * @param hcatPartitionSpecStrings List of strings to be converted into an HCatPartitionSpec. + * @return Deserialized HCatPartitionSpec instance. + * @throws HCatException On failure to deserialize. (e.g. incompatible serialization format, etc.) + */ + @InterfaceAudience.LimitedPrivate({"Hive"}) + @InterfaceStability.Evolving + public abstract HCatPartitionSpec deserializePartitionSpec(List hcatPartitionSpecStrings) throws HCatException; + } Index: hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java =================================================================== --- hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java (revision 1637277) +++ hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java (working copy) @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.ql.WindowsPathUtil; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; @@ -107,13 +108,17 @@ @BeforeClass public static void startMetaStoreServer() throws Exception { + hcatConf = new HiveConf(TestHCatClient.class); + if (Shell.WINDOWS) { + WindowsPathUtil.convertPathsFromWindowsToHdfs(hcatConf); + } + Thread t = new Thread(new RunMS(msPort)); t.start(); Thread.sleep(10000); securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); - hcatConf = new HiveConf(TestHCatClient.class); hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort); hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); @@ -970,7 +975,7 @@ sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build()); // The source table now has 2 partitions, one in TEXTFILE, the other in ORC. - // Test that adding these partitions to the target-table *without* replicating the table-change. + // Test adding these partitions to the target-table *without* replicating the table-change. List sourcePartitions = sourceMetaStore.getPartitions(dbName, tableName); assertEquals("Unexpected number of source partitions.", 2, sourcePartitions.size()); @@ -1002,4 +1007,139 @@ assertTrue("Unexpected exception! " + unexpected.getMessage(), false); } } + + /** + * Test that partition-definitions can be replicated between HCat-instances, + * independently of table-metadata replication, using PartitionSpec interfaces. + * (This is essentially the same test as testPartitionRegistrationWithCustomSchema(), + * transliterated to use the PartitionSpec APIs.) + * 2 identical tables are created on 2 different HCat instances ("source" and "target"). + * On the source instance, + * 1. One partition is added with the old format ("TEXTFILE"). + * 2. The table is updated with an additional column and the data-format changed to ORC. + * 3. Another partition is added with the new format. + * 4. The partitions' metadata is copied to the target HCat instance, without updating the target table definition. + * 5. The partitions' metadata is tested to be an exact replica of that on the source. + * @throws Exception + */ + @Test + public void testPartitionSpecRegistrationWithCustomSchema() throws Exception { + try { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient sourceMetaStore = HCatClient.create(new Configuration(hcatConf)); + final String dbName = "myDb"; + final String tableName = "myTable"; + + sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + + sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + List columnSchema = new ArrayList( + Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), + new HCatFieldSchema("bar", Type.STRING, ""))); + + List partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), + new HCatFieldSchema("grid", Type.STRING, "")); + + HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema) + .partCols(partitionSchema) + .comment("Source table."); + + sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build()); + + // Verify that the sourceTable was created successfully. + sourceTable = sourceMetaStore.getTable(dbName, tableName); + assertNotNull("Table couldn't be queried for. ", sourceTable); + + // Partitions added now should inherit table-schema, properties, etc. + Map partitionSpec_1 = new HashMap(); + partitionSpec_1.put("grid", "AB"); + partitionSpec_1.put("dt", "2011_12_31"); + HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, ""); + + sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build()); + assertEquals("Unexpected number of partitions. ", + sourceMetaStore.getPartitions(dbName, tableName).size(), 1); + // Verify that partition_1 was added correctly, and properties were inherited from the HCatTable. + HCatPartition addedPartition_1 = sourceMetaStore.getPartition(dbName, tableName, partitionSpec_1); + assertEquals("Column schema doesn't match.", addedPartition_1.getColumns(), sourceTable.getCols()); + assertEquals("InputFormat doesn't match.", addedPartition_1.getInputFormat(), sourceTable.getInputFileFormat()); + assertEquals("OutputFormat doesn't match.", addedPartition_1.getOutputFormat(), sourceTable.getOutputFileFormat()); + assertEquals("SerDe doesn't match.", addedPartition_1.getSerDe(), sourceTable.getSerdeLib()); + assertEquals("SerDe params don't match.", addedPartition_1.getSerdeParams(), sourceTable.getSerdeParams()); + + // Replicate table definition. + + HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf)); + targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + + targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + // Make a copy of the source-table, as would be done across class-loaders. + HCatTable targetTable = targetMetaStore.deserializeTable(sourceMetaStore.serializeTable(sourceTable)); + targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build()); + targetTable = targetMetaStore.getTable(dbName, tableName); + + assertEquals("Created table doesn't match the source.", + targetTable.diff(sourceTable), HCatTable.NO_DIFF); + + // Modify Table schema at the source. + List newColumnSchema = new ArrayList(columnSchema); + newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, "")); + Map tableParams = new HashMap(1); + tableParams.put("orc.compress", "ZLIB"); + sourceTable.cols(newColumnSchema) // Add a column. + .fileFormat("orcfile") // Change SerDe, File I/O formats. + .tblProps(tableParams) + .serdeParam(serdeConstants.FIELD_DELIM, Character.toString('\001')); + sourceMetaStore.updateTableSchema(dbName, tableName, sourceTable); + sourceTable = sourceMetaStore.getTable(dbName, tableName); + + // Add another partition to the source. + Map partitionSpec_2 = new HashMap(); + partitionSpec_2.put("grid", "AB"); + partitionSpec_2.put("dt", "2012_01_01"); + HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, ""); + sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build()); + + // The source table now has 2 partitions, one in TEXTFILE, the other in ORC. + // Test adding these partitions to the target-table *without* replicating the table-change. + + HCatPartitionSpec sourcePartitionSpec = sourceMetaStore.getPartitionSpecs(dbName, tableName, -1); + assertEquals("Unexpected number of source partitions.", 2, sourcePartitionSpec.size()); + + // Serialize the hcatPartitionSpec. + List partitionSpecString = sourceMetaStore.serializePartitionSpec(sourcePartitionSpec); + + // Deserialize the HCatPartitionSpec using the target HCatClient instance. + HCatPartitionSpec targetPartitionSpec = targetMetaStore.deserializePartitionSpec(partitionSpecString); + assertEquals("Could not add the expected number of partitions.", + sourcePartitionSpec.size(), targetMetaStore.addPartitionSpec(targetPartitionSpec)); + + // Retrieve partitions. + targetPartitionSpec = targetMetaStore.getPartitionSpecs(dbName, tableName, -1); + assertEquals("Could not retrieve the expected number of partitions.", + sourcePartitionSpec.size(), targetPartitionSpec.size()); + + // Assert that the source and target partitions are equivalent. + HCatPartitionSpec.HCatPartitionIterator sourceIterator = sourcePartitionSpec.getPartitionIterator(); + HCatPartitionSpec.HCatPartitionIterator targetIterator = targetPartitionSpec.getPartitionIterator(); + + while (targetIterator.hasNext()) { + assertTrue("Fewer target partitions than source.", sourceIterator.hasNext()); + HCatPartition sourcePartition = sourceIterator.next(); + HCatPartition targetPartition = targetIterator.next(); + assertEquals("Column schema doesn't match.", sourcePartition.getColumns(), targetPartition.getColumns()); + assertEquals("InputFormat doesn't match.", sourcePartition.getInputFormat(), targetPartition.getInputFormat()); + assertEquals("OutputFormat doesn't match.", sourcePartition.getOutputFormat(), targetPartition.getOutputFormat()); + assertEquals("SerDe doesn't match.", sourcePartition.getSerDe(), targetPartition.getSerDe()); + assertEquals("SerDe params don't match.", sourcePartition.getSerdeParams(), targetPartition.getSerdeParams()); + + } + } + catch (Exception unexpected) { + LOG.error( "Unexpected exception! ", unexpected); + assertTrue("Unexpected exception! " + unexpected.getMessage(), false); + } + } + } Index: hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecServiceImpl.java =================================================================== --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecServiceImpl.java (revision 1637277) +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecServiceImpl.java (working copy) @@ -229,15 +229,17 @@ watchdog.checkException(); } catch (Exception ex) { - LOG.error("Command: " + cmd + " failed:", ex); + LOG.error("Command: " + cmd + " failed. res=" + res, ex); } if(watchdog.killedProcess()) { String msg = " was terminated due to timeout(" + timeout + "ms). See " + AppConfig .EXEC_TIMEOUT_NAME + " property"; - LOG.warn("Command: " + cmd + msg); + LOG.warn("Command: " + cmd + msg + " res=" + res); res.stderr += " Command " + msg; } - + if(res.exitcode != 0) { + LOG.info("Command: " + cmd + " failed. res=" + res); + } return res; } Index: hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java =================================================================== --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java (revision 1637277) +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java (working copy) @@ -40,7 +40,7 @@ /** * Helper class to run jobs using Kerberos security. Always safe to - * use these methods, it's a noop if security is not enabled. + * use these methods, it's a no-op if security is not enabled. */ public class SecureProxySupport { private Path tokenPath; @@ -140,6 +140,7 @@ ugi.doAs(new PrivilegedExceptionAction() { public Object run() throws IOException { FileSystem fs = FileSystem.get(conf); + //todo: according to JavaDoc this seems like private API: addDelegationToken should be used twrapper.token = fs.getDelegationToken(ugi.getShortUserName()); return null; } Index: hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java =================================================================== --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java (revision 1637277) +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java (working copy) @@ -83,6 +83,20 @@ args.add("-D" + TempletonControllerJob.TOKEN_FILE_ARG_PLACEHOLDER); args.add("-D" + TempletonControllerJob.MAPREDUCE_JOB_TAGS_ARG_PLACEHOLDER); } + if(i == 0 && TempletonUtils.isset(libdir) && TempletonUtils.isset(appConf.sqoopArchive())) { + //http://sqoop.apache.org/docs/1.4.5/SqoopUserGuide.html#_using_generic_and_specific_arguments + String libJars = null; + for(String s : args) { + if(s.startsWith(JobSubmissionConstants.Sqoop.LIB_JARS)) { + libJars = s.substring(s.indexOf("=") + 1); + break; + } + } + //the jars in libJars will be localized to CWD of the launcher task; then -libjars will + //cause them to be localized for the Sqoop MR job tasks + args.add(TempletonUtils.quoteForWindows("-libjars")); + args.add(TempletonUtils.quoteForWindows(libJars)); + } } } else if (TempletonUtils.isset(optionsFile)) { args.add("--options-file"); @@ -114,11 +128,13 @@ /**Sqoop accesses databases via JDBC. This means it needs to have appropriate JDBC drivers available. Normally, the user would install Sqoop and place these jars into SQOOP_HOME/lib. When WebHCat is configured to auto-ship the Sqoop tar file, we - need to make sure that relevant JDBC jars are available on target node. + need to make sure that relevant JDBC jars are available on target node but we cannot modify + lib/ of exploded tar because Dist Cache intentionally prevents this. The user is expected to place any JDBC jars into an HDFS directory and specify this - dir in "libdir" parameter. All the files in this dir will be copied to lib/ of the - exploded Sqoop tar ball on target node. + dir in "libdir" parameter. WebHCat then ensures that these jars are localized for the launcher task + and made available to Sqoop. {@link org.apache.hive.hcatalog.templeton.tool.LaunchMapper#handleSqoop(org.apache.hadoop.conf.Configuration, java.util.Map)} + {@link #makeArgs(String, String, String, String, String, boolean, String)} */ LOG.debug("libdir=" + libdir); List jarList = TempletonUtils.hadoopFsListChildren(libdir, appConf, runAs); Index: hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java =================================================================== --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java (revision 1637277) +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java (working copy) @@ -18,7 +18,6 @@ */ package org.apache.hive.hcatalog.templeton.tool; -import com.google.common.io.Files; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -101,12 +100,19 @@ if(TempletonUtils.isset(conf.get(Sqoop.LIB_JARS))) { //LIB_JARS should only be set if Sqoop is auto-shipped LOG.debug(Sqoop.LIB_JARS + "=" + conf.get(Sqoop.LIB_JARS)); - //copy these (which have now been localized) jars to sqoop/lib - String destDir = conf.get(AppConfig.SQOOP_HOME_PATH) + File.separator + "lib"; String[] files = conf.getStrings(Sqoop.LIB_JARS); + StringBuilder jdbcJars = new StringBuilder(); for(String f : files) { - Files.copy(new File(f), new File(destDir + File.separator + f)); + jdbcJars.append(f).append(File.pathSeparator); } + jdbcJars.setLength(jdbcJars.length() - 1); + //this makes the jars available to Sqoop client + if(TempletonUtils.isset(System.getenv("HADOOP_CLASSPATH"))) { + env.put("HADOOP_CLASSPATH", System.getenv("HADOOP_CLASSPATH") + File.pathSeparator + jdbcJars.toString()); + } + else { + env.put("HADOOP_CLASSPATH", jdbcJars.toString()); + } } } protected Process startJob(Context context, String user, String overrideClasspath) Index: itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java =================================================================== --- itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java (revision 1637277) +++ itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java (working copy) @@ -171,8 +171,8 @@ MiniHiveKdc.HIVE_TEST_USER_2); } catch (SQLException e) { // Expected error - assertTrue(e.getMessage().contains("Failed to validate proxy privilege")); - assertTrue(e.getCause().getCause().getMessage().contains("Failed to validate proxy privilege")); + assertTrue(e.getMessage().contains("Error retrieving delegation token for user")); + assertTrue(e.getCause().getCause().getMessage().contains("is not allowed to impersonate")); } finally { hs2Conn.close(); } Index: itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java =================================================================== --- itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java (revision 1637277) +++ itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java (working copy) @@ -106,7 +106,7 @@ } // get service host - protected String getHost() { + public String getHost() { return hostname; } @@ -127,12 +127,12 @@ } // Get binary service port # - protected int getBinaryPort() { + public int getBinaryPort() { return binaryPort; } // Get http service port # - protected int getHttpPort() { + public int getHttpPort() { return httpPort; } Index: itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java (working copy) @@ -156,7 +156,7 @@ FunctionInvoker invoker = new FunctionInvoker() { @Override public void invoke() throws Exception { - msc.create_role(new Role()); + msc.create_role(new Role("role1", 0, "owner")); } }; testFunction(invoker); Index: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java (working copy) @@ -34,8 +34,10 @@ import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.WindowsPathUtil; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Shell; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -55,7 +57,11 @@ } protected HiveConf createHiveConf() throws Exception { - return new HiveConf(this.getClass()); + HiveConf conf = new HiveConf(this.getClass()); + if (Shell.WINDOWS) { + WindowsPathUtil.convertPathsFromWindowsToHdfs(conf); + } + return conf; } @Before @@ -71,9 +77,8 @@ System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName()); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); - clientHiveConf = createHiveConf(); + MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), clientHiveConf); // Turn off client-side authorization clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); Index: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java (working copy) @@ -18,10 +18,15 @@ package org.apache.hadoop.hive.ql.security; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; import org.junit.Assert; import org.junit.Test; @@ -30,6 +35,41 @@ */ public class TestStorageBasedMetastoreAuthorizationDrops extends StorageBasedMetastoreTestBase { + protected static MiniDFSShim dfs = null; + + @Override + protected HiveConf createHiveConf() throws Exception { + // Hadoop FS ACLs do not work with LocalFileSystem, so set up MiniDFS. + HiveConf conf = super.createHiveConf(); + + String currentUserName = ShimLoader.getHadoopShims().getUGIForConf(conf).getShortUserName(); + conf.set("hadoop.proxyuser." + currentUserName + ".groups", "*"); + conf.set("hadoop.proxyuser." + currentUserName + ".hosts", "*"); + dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null); + FileSystem fs = dfs.getFileSystem(); + + Path warehouseDir = new Path(new Path(fs.getUri()), "/warehouse"); + fs.mkdirs(warehouseDir); + conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseDir.toString()); + conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true); + + // Set up scratch directory + Path scratchDir = new Path(new Path(fs.getUri()), "/scratchdir"); + conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString()); + + return conf; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + + if (dfs != null) { + dfs.shutdown(); + dfs = null; + } + } + @Test public void testDropDatabase() throws Exception { dropDatabaseByOtherUser("-rwxrwxrwx", 0); Index: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java (working copy) @@ -140,7 +140,7 @@ executeStatementOnDriver("CREATE EXTERNAL TABLE " + tblNameStg + "(a INT, b STRING)" + " ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n'" + " STORED AS TEXTFILE" + - " LOCATION '" + stagingFolder.newFolder() + "'", driver); + " LOCATION '" + stagingFolder.newFolder().toURI().getPath() + "'", driver); executeStatementOnDriver("load data local inpath '" + BASIC_FILE_NAME + "' overwrite into table " + tblNameStg, driver); Index: itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestZooKeeperTokenStore.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestZooKeeperTokenStore.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestZooKeeperTokenStore.java (working copy) @@ -43,9 +43,6 @@ private CuratorFramework zkClient = null; private int zkPort = -1; private ZooKeeperTokenStore ts; - // connect timeout large enough for slower test environments - private final int connectTimeoutMillis = 30000; - private final int sessionTimeoutMillis = 3000; @Override protected void setUp() throws Exception { @@ -55,10 +52,9 @@ } this.zkCluster = new MiniZooKeeperCluster(); this.zkPort = this.zkCluster.startup(zkDataDir); - - this.zkClient = CuratorFrameworkFactory.builder().connectString("localhost:" + zkPort) - .sessionTimeoutMs(sessionTimeoutMillis).connectionTimeoutMs(connectTimeoutMillis) - .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build(); + this.zkClient = + CuratorFrameworkFactory.builder().connectString("localhost:" + zkPort) + .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build(); this.zkClient.start(); } @@ -74,15 +70,9 @@ private Configuration createConf(String zkPath) { Configuration conf = new Configuration(); - conf.set( - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR, - "localhost:" + this.zkPort); - conf.set( - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ZNODE, - zkPath); - conf.setLong( - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_TIMEOUTMILLIS, - connectTimeoutMillis); + conf.set(HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR, "localhost:" + + this.zkPort); + conf.set(HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ZNODE, zkPath); return conf; } Index: itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (working copy) @@ -1805,6 +1805,7 @@ ResultSet rs = stmt.executeQuery("SELECT 1 AS a, 2 AS a from " + tableName); assertTrue(rs.next()); assertEquals(1, rs.getInt("a")); + rs.close(); } Index: itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java (revision 1637277) +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java (working copy) @@ -31,15 +31,21 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hive.jdbc.miniHS2.MiniHS2; import org.junit.After; +import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestSSL { + private static final Logger LOG = LoggerFactory.getLogger(TestSSL.class); private static final String KEY_STORE_NAME = "keystore.jks"; private static final String TRUST_STORE_NAME = "truststore.jks"; private static final String KEY_STORE_PASSWORD = "HiveJdbc"; @@ -87,7 +93,74 @@ System.clearProperty(JAVA_TRUST_STORE_PASS_PROP); } + private int execCommand(String cmd) throws Exception { + int exitCode; + try { + String output = Shell.execCommand("bash", "-c", cmd); + LOG.info("Output from '" + cmd + "': " + output) ; + exitCode = 0; + } catch (Shell.ExitCodeException e) { + exitCode = e.getExitCode(); + LOG.info("Error executing '" + cmd + "', exitCode = " + exitCode, e); + } + return exitCode; + } + /*** + * Tests to ensure SSLv2 and SSLv3 are disabled + */ + @Test + public void testSSLVersion() throws Exception { + Assume.assumeTrue(execCommand("which openssl") == 0); // we need openssl + Assume.assumeTrue(System.getProperty("os.name").toLowerCase() + .contains("linux")); // we depend on linux openssl exit codes + + setSslConfOverlay(confOverlay); + // Test in binary mode + setBinaryConfOverlay(confOverlay); + // Start HS2 with SSL + miniHS2.start(confOverlay); + + // make SSL connection + hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL() + ";ssl=true;sslTrustStore=" + + dataFileDir + File.separator + TRUST_STORE_NAME + ";trustStorePassword=" + + KEY_STORE_PASSWORD, System.getProperty("user.name"), "bar"); + hs2Conn.close(); + Assert.assertEquals("Expected exit code of 1", 1, + execCommand("openssl s_client -connect " + miniHS2.getHost() + ":" + miniHS2.getBinaryPort() + + " -ssl2 < /dev/null")); + Assert.assertEquals("Expected exit code of 1", 1, + execCommand("openssl s_client -connect " + miniHS2.getHost() + ":" + miniHS2.getBinaryPort() + + " -ssl3 < /dev/null")); + miniHS2.stop(); + + // Test in http mode + setHttpConfOverlay(confOverlay); + miniHS2.start(confOverlay); + // make SSL connection + try { + hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL() + + ";ssl=true;sslTrustStore=" + dataFileDir + File.separator + + TRUST_STORE_NAME + ";trustStorePassword=" + KEY_STORE_PASSWORD + + "?hive.server2.transport.mode=" + HS2_HTTP_MODE + + ";hive.server2.thrift.http.path=" + HS2_HTTP_ENDPOINT, + System.getProperty("user.name"), "bar"); + Assert.fail("Expected SQLException during connect"); + } catch (SQLException e) { + LOG.info("Expected exception: " + e, e); + Assert.assertEquals("08S01", e.getSQLState().trim()); + Throwable cause = e.getCause(); + Assert.assertNotNull(cause); + while (cause.getCause() != null) { + cause = cause.getCause(); + } + Assert.assertEquals("org.apache.http.NoHttpResponseException", cause.getClass().getName()); + Assert.assertEquals("The target server failed to respond", cause.getMessage()); + } + miniHS2.stop(); + } + + /*** * Test SSL client with non-SSL server fails * @throws Exception */ Index: itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithCredentialProvider.java =================================================================== --- itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithCredentialProvider.java (revision 1637277) +++ itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithCredentialProvider.java (working copy) @@ -81,7 +81,7 @@ conf.set("hadoop.security.credential.clear-text-fallback", "true"); // Set up CredentialProvider - conf.set("hadoop.security.credential.provider.path", "jceks://file/" + tmpDir + "/test.jks"); + conf.set("hadoop.security.credential.provider.path", "jceks://file/" + tmpDir.toURI().getPath() + "/test.jks"); // CredentialProvider/CredentialProviderFactory may not exist, depending on the version of // hadoop-2 being used to build Hive. Use reflection to do the following lines Index: itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java =================================================================== --- itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java (revision 1637277) +++ itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java (working copy) @@ -21,14 +21,19 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.Statement; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.apache.hive.jdbc.miniHS2.MiniHS2; import org.junit.After; import org.junit.Before; @@ -37,6 +42,26 @@ public class TestSchedulerQueue { + // hadoop group mapping that maps user to same group + public static class HiveTestSimpleGroupMapping implements GroupMappingServiceProvider { + public static String primaryTag = ""; + @Override + public List getGroups(String user) throws IOException { + List results = new ArrayList(); + results.add(user + primaryTag); + results.add(user + "-group"); + return results; + } + + @Override + public void cacheGroupsRefresh() throws IOException { + } + + @Override + public void cacheGroupsAdd(List groups) throws IOException { + } + } + private MiniHS2 miniHS2 = null; private static HiveConf conf = new HiveConf(); private Connection hs2Conn = null; @@ -44,6 +69,8 @@ @BeforeClass public static void beforeTest() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); + conf.set("hadoop.security.group.mapping", + HiveTestSimpleGroupMapping.class.getName()); } @Before @@ -56,6 +83,7 @@ miniHS2.setConfProperty(YarnConfiguration.RM_SCHEDULER, "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"); miniHS2.start(new HashMap()); + HiveTestSimpleGroupMapping.primaryTag = ""; } @After @@ -79,6 +107,7 @@ @Test public void testFairSchedulerQueueMapping() throws Exception { hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "user1", "bar"); + verifyProperty(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); verifyProperty("mapreduce.framework.name", "yarn"); verifyProperty(HiveConf.ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE.varname, "true"); @@ -88,6 +117,31 @@ } /** + * Verify: + * Test is running with MR2 and queue mapping are set correctly for primary group rule. + * @throws Exception + */ + @Test + public void testFairSchedulerPrimaryQueueMapping() throws Exception { + miniHS2.setConfProperty(FairSchedulerConfiguration.ALLOCATION_FILE, "fair-scheduler-test.xml"); + HiveTestSimpleGroupMapping.primaryTag = "-test"; + hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "user2", "bar"); + verifyProperty("mapreduce.job.queuename", "root.user2" + HiveTestSimpleGroupMapping.primaryTag); + } + + /** + * Verify: + * Test is running with MR2 and queue mapping are set correctly for primary group rule. + * @throws Exception + */ + @Test + public void testFairSchedulerSecondaryQueueMapping() throws Exception { + miniHS2.setConfProperty(FairSchedulerConfiguration.ALLOCATION_FILE, "fair-scheduler-test.xml"); + hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "user3", "bar"); + verifyProperty("mapreduce.job.queuename", "root.user3-group"); + } + + /** * Verify that the queue refresh doesn't happen when configured to be off. * * @throws Exception Index: itests/src/test/resources/testconfiguration.properties =================================================================== --- itests/src/test/resources/testconfiguration.properties (revision 1637277) +++ itests/src/test/resources/testconfiguration.properties (working copy) @@ -55,7 +55,20 @@ bucket2.q,\ bucket3.q,\ bucket4.q,\ - cbo_correctness.q,\ + cbo_gby.q,\ + cbo_gby_empty.q,\ + cbo_join.q,\ + cbo_limit.q,\ + cbo_semijoin.q,\ + cbo_simple_select.q,\ + cbo_stats.q,\ + cbo_subq_exists.q,\ + cbo_subq_in.q,\ + cbo_subq_not_in.q,\ + cbo_udf_udaf.q,\ + cbo_union.q,\ + cbo_views.q,\ + cbo_windowing.q,\ correlationoptimizer1.q,\ count.q,\ create_merge_compressed.q,\ @@ -161,9 +174,25 @@ vector_cast_constant.q,\ vector_char_4.q,\ vector_char_simple.q,\ + vector_coalesce.q,\ vector_count_distinct.q,\ vector_data_types.q,\ + vector_decimal_1.q,\ + vector_decimal_10_0.q,\ + vector_decimal_2.q,\ + vector_decimal_3.q,\ + vector_decimal_4.q,\ + vector_decimal_5.q,\ + vector_decimal_6.q,\ vector_decimal_aggregate.q,\ + vector_decimal_cast.q,\ + vector_decimal_expressions.q,\ + vector_decimal_mapjoin.q,\ + vector_decimal_math_funcs.q,\ + vector_decimal_precision.q,\ + vector_decimal_trailing.q,\ + vector_decimal_udf.q,\ + vector_decimal_udf2.q,\ vector_distinct_2.q,\ vector_elt.q,\ vector_groupby_3.q,\ @@ -196,6 +225,7 @@ vectorization_9.q,\ vectorization_decimal_date.q,\ vectorization_div0.q,\ + vectorization_limit.q,\ vectorization_nested_udf.q,\ vectorization_not.q,\ vectorization_part.q,\ @@ -204,7 +234,10 @@ vectorization_short_regress.q,\ vectorized_bucketmapjoin1.q,\ vectorized_case.q,\ + vectorized_casts.q,\ vectorized_context.q,\ + vectorized_date_funcs.q,\ + vectorized_distinct_gby.q,\ vectorized_mapjoin.q,\ vectorized_math_funcs.q,\ vectorized_nested_mapjoin.q,\ Index: itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java =================================================================== --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (revision 1637277) +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (working copy) @@ -952,7 +952,7 @@ for (Task plan : tasks) { Utilities.serializePlan(plan, ofs, conf); } - + ofs.close(); fixXml4JDK7(outf.getPath()); maskPatterns(xmlPlanMask, outf.getPath()); @@ -964,6 +964,7 @@ return exitVal; } finally { conf.set(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "kryo"); + IOUtils.closeQuietly(ofs); } } Index: jdbc/pom.xml =================================================================== --- jdbc/pom.xml (revision 1637277) +++ jdbc/pom.xml (working copy) @@ -29,6 +29,7 @@ .. + false @@ -97,6 +98,11 @@ + + org.apache.curator + curator-framework + ${curator.version} + @@ -122,6 +128,12 @@ + + dist + + true + + @@ -138,7 +150,7 @@ shade - true + ${packaging.minimizeJar} true ${hive.jdbc.driver.classifier} @@ -163,6 +175,12 @@ + commons-logging:commons-logging + + ** + + + *:* META-INF/*.SF Index: jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java =================================================================== --- jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java (revision 1637277) +++ jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java (working copy) @@ -99,6 +99,7 @@ private JdbcConnectionParams connParams; private final boolean isEmbeddedMode; private TTransport transport; + private boolean assumeSubject; // TODO should be replaced by CliServiceClient private TCLIService.Iface client; private boolean isClosed = true; @@ -177,6 +178,9 @@ private void openTransport() throws SQLException { while (true) { try { + assumeSubject = + JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equals(sessConfMap + .get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE)); transport = isHttpTransportMode() ? createHttpTransport() : createBinaryTransport(); if (!transport.isOpen()) { LOG.info("Will try to open client transport with JDBC Uri: " + jdbcUriString); @@ -265,8 +269,9 @@ * In https mode, the entire information is encrypted * TODO: Optimize this with a mix of kerberos + using cookie. */ - requestInterceptor = new HttpKerberosRequestInterceptor( - sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host, getServerHttpUrl(false)); + requestInterceptor = + new HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), + host, getServerHttpUrl(useSsl), assumeSubject); } else { /** @@ -351,8 +356,6 @@ } saslProps.put(Sasl.QOP, saslQOP.toString()); saslProps.put(Sasl.SERVER_AUTH, "true"); - boolean assumeSubject = JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equals(sessConfMap - .get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE)); transport = KerberosSaslHelper.getKerberosTransport( sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host, HiveAuthFactory.getSocketTransport(host, port, loginTimeout), saslProps, Index: jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java =================================================================== --- jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java (revision 1637277) +++ jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java (working copy) @@ -39,15 +39,17 @@ String principal; String host; String serverHttpUrl; + boolean assumeSubject; // A fair reentrant lock private static ReentrantLock kerberosLock = new ReentrantLock(true); public HttpKerberosRequestInterceptor(String principal, String host, - String serverHttpUrl) { + String serverHttpUrl, boolean assumeSubject) { this.principal = principal; this.host = host; this.serverHttpUrl = serverHttpUrl; + this.assumeSubject = assumeSubject; } @Override @@ -59,7 +61,7 @@ // Locking ensures the tokens are unique in case of concurrent requests kerberosLock.lock(); kerberosAuthHeader = HttpAuthUtils.getKerberosServiceTicket( - principal, host, serverHttpUrl); + principal, host, serverHttpUrl, assumeSubject); // Set the session key token (Base64 encoded) in the headers httpRequest.addHeader(HttpAuthUtils.AUTHORIZATION + ": " + HttpAuthUtils.NEGOTIATE + " ", kerberosAuthHeader); Index: jdbc/src/java/org/apache/hive/jdbc/Utils.java =================================================================== --- jdbc/src/java/org/apache/hive/jdbc/Utils.java (revision 1637277) +++ jdbc/src/java/org/apache/hive/jdbc/Utils.java (working copy) @@ -100,8 +100,6 @@ static final String ZOOKEEPER_DEFAULT_NAMESPACE = "hiveserver2"; // Non-configurable params: - // ZOOKEEPER_SESSION_TIMEOUT is not exposed as client configurable - static final int ZOOKEEPER_SESSION_TIMEOUT = 600 * 1000; // Currently supports JKS keystore format static final String SSL_TRUST_STORE_TYPE = "JKS"; Index: jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java =================================================================== --- jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java (revision 1637277) +++ jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java (working copy) @@ -25,9 +25,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.hive.jdbc.Utils.JdbcConnectionParams; import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; public class ZooKeeperHiveClientHelper { public static final Log LOG = LogFactory.getLog(ZooKeeperHiveClientHelper.class.getName()); @@ -59,14 +61,12 @@ List serverHosts; Random randomizer = new Random(); String serverNode; - ZooKeeper zooKeeperClient = null; - // Pick a random HiveServer2 host from the ZooKeeper namspace + CuratorFramework zooKeeperClient = + CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble) + .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build(); + zooKeeperClient.start(); try { - zooKeeperClient = - new ZooKeeper(zooKeeperEnsemble, JdbcConnectionParams.ZOOKEEPER_SESSION_TIMEOUT, - new ZooKeeperHiveClientHelper.DummyWatcher()); - // All the HiveServer2 host nodes that are in ZooKeeper currently - serverHosts = zooKeeperClient.getChildren("/" + zooKeeperNamespace, false); + serverHosts = zooKeeperClient.getChildren().forPath("/" + zooKeeperNamespace); // Remove the znodes we've already tried from this list serverHosts.removeAll(connParams.getRejectedHostZnodePaths()); if (serverHosts.isEmpty()) { @@ -76,22 +76,18 @@ // Now pick a host randomly serverNode = serverHosts.get(randomizer.nextInt(serverHosts.size())); connParams.setCurrentHostZnodePath(serverNode); - // Read the value from the node (UTF-8 enoded byte array) and convert it to a String String serverUri = - new String(zooKeeperClient.getData("/" + zooKeeperNamespace + "/" + serverNode, false, - null), Charset.forName("UTF-8")); + new String( + zooKeeperClient.getData().forPath("/" + zooKeeperNamespace + "/" + serverNode), + Charset.forName("UTF-8")); LOG.info("Selected HiveServer2 instance with uri: " + serverUri); return serverUri; } catch (Exception e) { throw new ZooKeeperHiveClientException("Unable to read HiveServer2 uri from ZooKeeper", e); } finally { - // Try to close the client connection with ZooKeeper + // Close the client connection with ZooKeeper if (zooKeeperClient != null) { - try { - zooKeeperClient.close(); - } catch (Exception e) { - // No-op - } + zooKeeperClient.close(); } } } Index: metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql =================================================================== --- metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql (revision 1637277) +++ metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql (working copy) @@ -1 +1,32 @@ +-- +-- Create the table if it doesn't exist. +-- +if not exists (SELECT 1 FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_NAME='PART_COL_STATS') +CREATE TABLE PART_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" varchar(128) NOT NULL, + COLUMN_TYPE varchar(128) NOT NULL, + DB_NAME varchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE varchar(255) NULL, + BIG_DECIMAL_LOW_VALUE varchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + PART_ID bigint NULL, + PARTITION_NAME varchar(767) NOT NULL, + "TABLE_NAME" varchar(128) NOT NULL +) +go + + CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); Index: metastore/scripts/upgrade/mssql/004-HIVE-8550.mssql.sql =================================================================== --- metastore/scripts/upgrade/mssql/004-HIVE-8550.mssql.sql (revision 0) +++ metastore/scripts/upgrade/mssql/004-HIVE-8550.mssql.sql (working copy) @@ -0,0 +1,13 @@ +--ALTER PARTITIONS.PART_NAME from varchar to nvarchar +DROP INDEX PARTITIONS.UNIQUEPARTITION; +ALTER TABLE PARTITIONS ALTER COLUMN PART_NAME NVARCHAR(767) NULL; +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); +GO + +--ALTER SDS.LOCATION from varchar to nvarchar +ALTER TABLE SDS ALTER COLUMN LOCATION NVARCHAR(4000) NULL; +GO + +--ALTER PARTITION_KEY_VALS.PART_KEY_VAL from varchar to nvarchar +ALTER TABLE PARTITION_KEY_VALS ALTER COLUMN PART_KEY_VAL NVARCHAR(255) NULL; +GO \ No newline at end of file Index: metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql =================================================================== --- metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql (revision 1637277) +++ metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql (working copy) @@ -140,7 +140,7 @@ PART_ID bigint NOT NULL, CREATE_TIME int NOT NULL, LAST_ACCESS_TIME int NOT NULL, - PART_NAME varchar(767) NULL, + PART_NAME nvarchar(767) NULL, SD_ID bigint NULL, TBL_ID bigint NULL ); @@ -371,7 +371,7 @@ INPUT_FORMAT varchar(4000) NULL, IS_COMPRESSED bit NOT NULL, IS_STOREDASSUBDIRECTORIES bit NOT NULL, - LOCATION varchar(4000) NULL, + LOCATION nvarchar(4000) NULL, NUM_BUCKETS int NOT NULL, OUTPUT_FORMAT varchar(4000) NULL, SERDE_ID bigint NULL @@ -437,7 +437,7 @@ CREATE TABLE PARTITION_KEY_VALS ( PART_ID bigint NOT NULL, - PART_KEY_VAL varchar(255) NULL, + PART_KEY_VAL nvarchar(255) NULL, INTEGER_IDX int NOT NULL ); Index: metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql =================================================================== --- metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql (revision 1637277) +++ metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql (working copy) @@ -140,7 +140,7 @@ PART_ID bigint NOT NULL, CREATE_TIME int NOT NULL, LAST_ACCESS_TIME int NOT NULL, - PART_NAME varchar(767) NULL, + PART_NAME nvarchar(767) NULL, SD_ID bigint NULL, TBL_ID bigint NULL ); @@ -371,7 +371,7 @@ INPUT_FORMAT varchar(4000) NULL, IS_COMPRESSED bit NOT NULL, IS_STOREDASSUBDIRECTORIES bit NOT NULL, - LOCATION varchar(4000) NULL, + LOCATION nvarchar(4000) NULL, NUM_BUCKETS int NOT NULL, OUTPUT_FORMAT varchar(4000) NULL, SERDE_ID bigint NULL @@ -437,7 +437,7 @@ CREATE TABLE PARTITION_KEY_VALS ( PART_ID bigint NOT NULL, - PART_KEY_VAL varchar(255) NULL, + PART_KEY_VAL nvarchar(255) NULL, INTEGER_IDX int NOT NULL ); Index: metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql =================================================================== --- metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql (revision 1637277) +++ metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql (working copy) @@ -1 +1,31 @@ +-- +-- Create the table if it doesn't exist. +-- +CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PART_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + + CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; Index: metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql =================================================================== --- metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql (revision 1637277) +++ metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql (working copy) @@ -1 +1,27 @@ +-- +-- Create the table if it doesn't exist. +-- +CREATE TABLE IF NOT EXISTS PART_COL_STATS ( + CS_ID NUMBER NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + PARTITION_NAME VARCHAR2(767) NOT NULL, + COLUMN_NAME VARCHAR2(128) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + PART_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL +); + CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); Index: metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql =================================================================== --- metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql (revision 1637277) +++ metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql (working copy) @@ -1 +1,29 @@ +-- +-- Create the table if it doesn't exist. +-- + +CREATE TABLE IF NOT EXISTS "PART_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(128) DEFAULT NULL::character varying, + "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL +); + + CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1637277) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -5773,6 +5773,7 @@ // Server will create new threads up to max as necessary. After an idle // period, it will destroy threads to keep the number of threads in the // pool to min. + int maxMessageSize = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE); int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); @@ -5824,6 +5825,7 @@ .processor(processor) .transportFactory(transFactory) .protocolFactory(new TBinaryProtocol.Factory()) + .inputProtocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize)) .minWorkerThreads(minWorkerThreads) .maxWorkerThreads(maxWorkerThreads); Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (revision 1637277) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (working copy) @@ -48,7 +48,6 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalType; @@ -151,18 +150,17 @@ * here - for eg., for MySQL, we signal that we want to use ANSI SQL quoting behaviour */ private void doDbSpecificInitializationsBeforeQuery() throws MetaException { - if (isMySql){ - try { - assert pm.currentTransaction().isActive(); // must be inside tx together with queries - trySetAnsiQuotesForMysql(); - } catch (SQLException sqlEx) { - throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage()); - } + if (!isMySql) return; + try { + assert pm.currentTransaction().isActive(); // must be inside tx together with queries + trySetAnsiQuotesForMysql(); + } catch (SQLException sqlEx) { + throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage()); } } /** - * MySQL, by default, doesn't recognize ANSI quotes which need to have for Postgres. + * MySQL, by default, doesn't recognize ANSI quotes which we need to have for Postgres. * Try to set the ANSI quotes mode on for the session. Due to connection pooling, needs * to be called in the same transaction as the actual queries. */ @@ -194,18 +192,20 @@ Object[] params = new Object[] { dbName }; queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector); - LOG.debug("getDatabase:query instantiated : " + queryTextDbSelector + " with param ["+params[0]+"]"); + if (LOG.isTraceEnabled()) { + LOG.trace("getDatabase:query instantiated : " + queryTextDbSelector + + " with param [" + params[0] + "]"); + } + @SuppressWarnings("unchecked") List sqlResult = (List)queryDbSelector.executeWithArray(params); if ((sqlResult == null) || sqlResult.isEmpty()) { - LOG.debug("getDatabase:queryDbSelector ran, returned no/empty results, returning NoSuchObjectException"); - throw new MetaException("There is no database named " + dbName); + return null; } assert(sqlResult.size() == 1); - if (sqlResult.get(0) == null){ - LOG.debug("getDatabase:queryDbSelector ran, returned results, but the result entry was null, returning NoSuchObjectException"); - throw new MetaException("There is no database named " + dbName); + if (sqlResult.get(0) == null) { + return null; } Object[] dbline = sqlResult.get(0); @@ -215,25 +215,28 @@ + " FROM \"DATABASE_PARAMS\" " + " WHERE \"DB_ID\" = ? " + " AND \"PARAM_KEY\" IS NOT NULL"; - Object[] params2 = new Object[] { dbid }; - queryDbParams = pm.newQuery("javax.jdo.query.SQL",queryTextDbParams); - LOG.debug("getDatabase:query2 instantiated : " + queryTextDbParams + " with param ["+params2[0]+"]"); + params[0] = dbid; + queryDbParams = pm.newQuery("javax.jdo.query.SQL", queryTextDbParams); + if (LOG.isTraceEnabled()) { + LOG.trace("getDatabase:query2 instantiated : " + queryTextDbParams + + " with param [" + params[0] + "]"); + } Map dbParams = new HashMap(); - List sqlResult2 = ensureList(queryDbParams.executeWithArray(params2)); - if (!sqlResult2.isEmpty()){ - for (Object[] line : sqlResult2){ + List sqlResult2 = ensureList(queryDbParams.executeWithArray(params)); + if (!sqlResult2.isEmpty()) { + for (Object[] line : sqlResult2) { dbParams.put(extractSqlString(line[0]),extractSqlString(line[1])); } } - LOG.debug("getDatabase: instantiating db object to return"); Database db = new Database(); db.setName(extractSqlString(dbline[1])); db.setLocationUri(extractSqlString(dbline[2])); db.setDescription(extractSqlString(dbline[3])); db.setOwnerName(extractSqlString(dbline[4])); String type = extractSqlString(dbline[5]); - db.setOwnerType((null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type)); + db.setOwnerType( + (null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type)); db.setParameters(dbParams); if (LOG.isDebugEnabled()){ LOG.debug("getDatabase: directsql returning db " + db.getName() Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1637277) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -521,17 +521,22 @@ @Override public Database getDatabase(String name) throws NoSuchObjectException { + MetaException ex = null; + Database db = null; try { - return getDatabaseInternal(name); + db = getDatabaseInternal(name); } catch (MetaException e) { // Signature restriction to NSOE, and NSOE being a flat exception prevents us from // setting the cause of the NSOE as the MetaException. We should not lose the info // we got here, but it's very likely that the MetaException is irrelevant and is // actually an NSOE message, so we should log it and throw an NSOE with the msg. - LOG.warn("Got a MetaException trying to call getDatabase(" - +name+"), returning NoSuchObjectException", e); - throw new NoSuchObjectException(e.getMessage()); + ex = e; } + if (db == null) { + LOG.warn("Failed to get database " + name +", returning NoSuchObjectException", ex); + throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage()))); + } + return db; } public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException { @@ -2375,7 +2380,7 @@ } private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObjectException { - LOG.error("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex); + LOG.warn("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex); if (!allowJdo) { if (ex instanceof MetaException) { throw (MetaException)ex; Index: metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java (revision 1637277) +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java (working copy) @@ -127,7 +127,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "setRunAs"); + detectDeadlock(dbConn, e, "setRunAs"); } finally { closeDbConn(dbConn); closeStmt(stmt); @@ -192,7 +192,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "findNextToCompact"); + detectDeadlock(dbConn, e, "findNextToCompact"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { @@ -234,7 +234,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "markCompacted"); + detectDeadlock(dbConn, e, "markCompacted"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { @@ -377,7 +377,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "markCleaned"); + detectDeadlock(dbConn, e, "markCleaned"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { @@ -429,7 +429,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "cleanEmptyAbortedTxns"); + detectDeadlock(dbConn, e, "cleanEmptyAbortedTxns"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { @@ -475,7 +475,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "revokeFromLocalWorkers"); + detectDeadlock(dbConn, e, "revokeFromLocalWorkers"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { @@ -522,7 +522,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "revokeTimedoutWorkers"); + detectDeadlock(dbConn, e, "revokeTimedoutWorkers"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { Index: metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (revision 1637277) +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (working copy) @@ -65,13 +65,13 @@ static final protected char TXN_OPEN = 'o'; // Lock states - static final private char LOCK_ACQUIRED = 'a'; - static final private char LOCK_WAITING = 'w'; + static final protected char LOCK_ACQUIRED = 'a'; + static final protected char LOCK_WAITING = 'w'; // Lock types - static final private char LOCK_EXCLUSIVE = 'e'; - static final private char LOCK_SHARED = 'r'; - static final private char LOCK_SEMI_SHARED = 'w'; + static final protected char LOCK_EXCLUSIVE = 'e'; + static final protected char LOCK_SHARED = 'r'; + static final protected char LOCK_SEMI_SHARED = 'w'; static final private int ALLOWED_REPEATED_DEADLOCKS = 5; static final private Log LOG = LogFactory.getLog(TxnHandler.class.getName()); @@ -301,7 +301,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "openTxns"); + detectDeadlock(dbConn, e, "openTxns"); throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e)); } finally { @@ -336,7 +336,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "abortTxn"); + detectDeadlock(dbConn, e, "abortTxn"); throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { @@ -393,7 +393,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "commitTxn"); + detectDeadlock(dbConn, e, "commitTxn"); throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { @@ -419,7 +419,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "lock"); + detectDeadlock(dbConn, e, "lock"); throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { @@ -444,7 +444,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "lockNoWait"); + detectDeadlock(dbConn, e, "lockNoWait"); throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { @@ -479,7 +479,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "checkLock"); + detectDeadlock(dbConn, e, "checkLock"); throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { @@ -534,7 +534,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "unlock"); + detectDeadlock(dbConn, e, "unlock"); throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { @@ -613,7 +613,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "heartbeat"); + detectDeadlock(dbConn, e, "heartbeat"); throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e)); } finally { @@ -652,7 +652,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "heartbeatTxnRange"); + detectDeadlock(dbConn, e, "heartbeatTxnRange"); throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e)); } finally { @@ -735,7 +735,7 @@ dbConn.rollback(); } catch (SQLException e1) { } - detectDeadlock(e, "compact"); + detectDeadlock(dbConn, e, "compact"); throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e)); } finally { @@ -898,15 +898,30 @@ * Determine if an exception was a deadlock. Unfortunately there is no standard way to do * this, so we have to inspect the error messages and catch the telltale signs for each * different database. + * @param conn database connection * @param e exception that was thrown. * @param caller name of the method calling this * @throws org.apache.hadoop.hive.metastore.txn.TxnHandler.DeadlockException when deadlock * detected and retry count has not been exceeded. */ - protected void detectDeadlock(SQLException e, String caller) throws DeadlockException { - final String mysqlDeadlock = - "Deadlock found when trying to get lock; try restarting transaction"; - if (e.getMessage().contains(mysqlDeadlock) || e instanceof SQLTransactionRollbackException) { + protected void detectDeadlock(Connection conn, + SQLException e, + String caller) throws DeadlockException, MetaException { + + // If you change this function, remove the @Ignore from TestTxnHandler.deadlockIsDetected() + // to test these changes. + // MySQL and MSSQL use 40001 as the state code for rollback. Postgres uses 40001 and 40P01. + // Oracle seems to return different SQLStates each time, but the message always contains + // "deadlock detected", so I've used that instead. + // Derby and newer MySQL driver use the new SQLTransactionRollbackException + if (dbProduct == null) { + determineDatabaseProduct(conn); + } + if (e instanceof SQLTransactionRollbackException || + ((dbProduct == DatabaseProduct.MYSQL || dbProduct == DatabaseProduct.POSTGRES || + dbProduct == DatabaseProduct.SQLSERVER) && e.getSQLState().equals("40001")) || + (dbProduct == DatabaseProduct.POSTGRES && e.getSQLState().equals("40P01")) || + (dbProduct == DatabaseProduct.ORACLE && (e.getMessage().contains("deadlock detected")))) { if (deadlockCnt++ < ALLOWED_REPEATED_DEADLOCKS) { LOG.warn("Deadlock detected in " + caller + ", trying again."); throw new DeadlockException(); Index: metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java (revision 1637277) +++ metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java (working copy) @@ -20,13 +20,18 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreThread; import org.apache.hadoop.hive.metastore.api.*; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.junit.After; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; @@ -1082,6 +1087,115 @@ for (int i = 0; i < saw.length; i++) assertTrue("Didn't see lock id " + i, saw[i]); } + @Test + @Ignore + public void deadlockDetected() throws Exception { + Connection conn = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); + Statement stmt = conn.createStatement(); + long now = txnHandler.getDbTime(conn); + stmt.executeUpdate("insert into TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, " + + "txn_user, txn_host) values (1, 'o', " + now + ", " + now + ", 'shagy', " + + "'scooby.com')"); + stmt.executeUpdate("insert into HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, " + + "hl_db, hl_table, hl_partition, hl_lock_state, hl_lock_type, hl_last_heartbeat, " + + "hl_user, hl_host) values (1, 1, 1, 'mydb', 'mytable', 'mypartition', '" + + txnHandler.LOCK_WAITING + "', '" + txnHandler.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " + + "'scooby.com')"); + conn.commit(); + txnHandler.closeDbConn(conn); + + final MetaStoreThread.BooleanPointer sawDeadlock = new MetaStoreThread.BooleanPointer(); + + final Connection conn1 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); + final Connection conn2 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); + try { + + for (int i = 0; i < 5; i++) { + Thread t1 = new Thread() { + @Override + public void run() { + try { + try { + updateTxns(conn1); + updateLocks(conn1); + Thread.sleep(1000); + conn1.commit(); + LOG.debug("no exception, no deadlock"); + } catch (SQLException e) { + try { + txnHandler.detectDeadlock(conn1, e, "thread t1"); + LOG.debug("Got an exception, but not a deadlock, SQLState is " + + e.getSQLState() + " class of exception is " + e.getClass().getName() + + " msg is <" + e.getMessage() + ">"); + } catch (TxnHandler.DeadlockException de) { + LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + + "exception is " + e.getClass().getName() + " msg is <" + e + .getMessage() + ">"); + sawDeadlock.boolVal = true; + } + } + conn1.rollback(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + + Thread t2 = new Thread() { + @Override + public void run() { + try { + try { + updateLocks(conn2); + updateTxns(conn2); + Thread.sleep(1000); + conn2.commit(); + LOG.debug("no exception, no deadlock"); + } catch (SQLException e) { + try { + txnHandler.detectDeadlock(conn2, e, "thread t2"); + LOG.debug("Got an exception, but not a deadlock, SQLState is " + + e.getSQLState() + " class of exception is " + e.getClass().getName() + + " msg is <" + e.getMessage() + ">"); + } catch (TxnHandler.DeadlockException de) { + LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + + "exception is " + e.getClass().getName() + " msg is <" + e + .getMessage() + ">"); + sawDeadlock.boolVal = true; + } + } + conn2.rollback(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + + t1.start(); + t2.start(); + t1.join(); + t2.join(); + if (sawDeadlock.boolVal) break; + } + assertTrue(sawDeadlock.boolVal); + } finally { + conn1.rollback(); + txnHandler.closeDbConn(conn1); + conn2.rollback(); + txnHandler.closeDbConn(conn2); + } + } + + private void updateTxns(Connection conn) throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("update TXNS set txn_last_heartbeat = txn_last_heartbeat + 1"); + } + + private void updateLocks(Connection conn) throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("update HIVE_LOCKS set hl_last_heartbeat = hl_last_heartbeat + 1"); + } + @Before public void setUp() throws Exception { TxnDbUtil.prepDb(); Index: pom.xml =================================================================== --- pom.xml (revision 1637277) +++ pom.xml (working copy) @@ -130,6 +130,7 @@ 7.6.0.v20120127 1.14 0.9.94 + 1.11 1.1 3.5.2 20090211 @@ -350,6 +351,11 @@ ${jline.version} + org.fusesource.jansi + jansi + ${jansi.version} + + junit junit ${junit.version} @@ -487,13 +493,12 @@ - - org.apache.curator - curator-framework - ${curator.version} - - - + + org.apache.curator + curator-framework + ${curator.version} + + org.codehaus.groovy groovy-all ${groovy.version} @@ -838,7 +843,7 @@ ${test.warehouse.scheme}${test.warehouse.dir} true - src,src1,srcbucket,srcbucket2,src_json,src_thrift,src_sequencefile,srcpart,alltypesorc,src_hbase + src,src1,srcbucket,srcbucket2,src_json,src_thrift,src_sequencefile,srcpart,alltypesorc,src_hbase,cbo_t1,cbo_t2,cbo_t3,src_cbo,part,lineitem ${test.tmp.dir}/conf/krb5.conf Index: ql/pom.xml =================================================================== --- ql/pom.xml (revision 1637277) +++ ql/pom.xml (working copy) @@ -274,6 +274,16 @@ test + jline + jline + ${jline.version} + + + org.fusesource.jansi + jansi + ${jansi.version} + + org.apache.tez tez-api ${tez.version} Index: ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt (working copy) @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; /** * Generated from template ColumnArithmeticColumnDecimal.txt, which covers binary arithmetic @@ -61,8 +61,8 @@ DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumn]; int[] sel = batch.selected; int n = batch.size; - Decimal128[] vector1 = inputColVector1.vector; - Decimal128[] vector2 = inputColVector2.vector; + HiveDecimalWritable[] vector1 = inputColVector1.vector; + HiveDecimalWritable[] vector2 = inputColVector2.vector; // return immediately if batch is empty if (n == 0) { Index: ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt (working copy) @@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; /** * Generated from template ColumnArithmeticScalarDecimal.txt, which covers binary arithmetic @@ -35,10 +36,10 @@ private static final long serialVersionUID = 1L; private int colNum; - private Decimal128 value; + private HiveDecimal value; private int outputColumn; - public (int colNum, Decimal128 value, int outputColumn) { + public (int colNum, HiveDecimal value, int outputColumn) { this.colNum = colNum; this.value = value; this.outputColumn = outputColumn; @@ -64,7 +65,7 @@ outputColVector.noNulls = inputColVector.noNulls; outputColVector.isRepeating = inputColVector.isRepeating; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -129,27 +130,7 @@ public int getOutputColumn() { return outputColumn; } - - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - public Decimal128 getValue() { - return value; - } - - public void setValue(Decimal128 value) { - this.value = value; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt (working copy) @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; /** * Generated from template ColumnArithmeticColumnDecimal.txt, which covers binary arithmetic @@ -61,8 +61,8 @@ DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumn]; int[] sel = batch.selected; int n = batch.size; - Decimal128[] vector1 = inputColVector1.vector; - Decimal128[] vector2 = inputColVector2.vector; + HiveDecimalWritable[] vector1 = inputColVector1.vector; + HiveDecimalWritable[] vector2 = inputColVector2.vector; // return immediately if batch is empty if (n == 0) { @@ -138,26 +138,6 @@ return outputColumn; } - public int getColNum1() { - return colNum1; - } - - public void setColNum1(int colNum1) { - this.colNum1 = colNum1; - } - - public int getColNum2() { - return colNum2; - } - - public void setColNum2(int colNum2) { - this.colNum2 = colNum2; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt (working copy) @@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; /** * Generated from template ColumnDivideScalarDecimal.txt, which covers binary arithmetic @@ -35,11 +36,10 @@ private static final long serialVersionUID = 1L; private int colNum; - private Decimal128 value; + private HiveDecimal value; private int outputColumn; - private transient Decimal128 zero; // to hold constant 0 for later use - public (int colNum, Decimal128 value, int outputColumn) { + public (int colNum, HiveDecimal value, int outputColumn) { this.colNum = colNum; this.value = value; this.outputColumn = outputColumn; @@ -65,14 +65,9 @@ outputColVector.noNulls = inputColVector.noNulls; outputColVector.isRepeating = inputColVector.isRepeating; int n = batch.size; - Decimal128[] vector = inputColVector.vector; - Decimal128[] outputVector = outputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; + HiveDecimalWritable[] outputVector = outputColVector.vector; - // Initialize local variable to use as 0 value on first use. - if (zero == null) { - this.zero = new Decimal128(0, inputColVector.scale); - } - // return immediately if batch is empty if (n == 0) { return; @@ -90,7 +85,7 @@ } - if (value.compareTo(zero) == 0) { + if (value.compareTo(HiveDecimal.ZERO) == 0) { // Denominator is zero, convert the batch to nulls outputColVector.noNulls = false; @@ -142,26 +137,6 @@ return outputColumn; } - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public Decimal128 getValue() { - return value; - } - - public void setValue(Decimal128 value) { - this.value = value; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt (working copy) @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.*; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; @@ -59,7 +59,7 @@ boolean[] outputIsNull = outputColVector.isNull; outputColVector.noNulls = inputColVector.noNulls; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -117,19 +117,7 @@ public String getOutputType() { return outputType; } - - public int getColNum() { - return colNum; - } - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt (working copy) @@ -18,8 +18,10 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions.gen; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; @@ -37,10 +39,10 @@ private int colNum; // The comparison is of the form "column BETWEEN leftValue AND rightValue" - private Decimal128 leftValue; - private Decimal128 rightValue; + private HiveDecimal leftValue; + private HiveDecimal rightValue; - public (int colNum, Decimal128 leftValue, Decimal128 rightValue) { + public (int colNum, HiveDecimal leftValue, HiveDecimal rightValue) { this.colNum = colNum; this.leftValue = leftValue; this.rightValue = rightValue; @@ -60,7 +62,7 @@ int[] sel = batch.selected; boolean[] nullPos = inputColVector.isNull; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -72,7 +74,7 @@ // All must be selected otherwise size would be zero. // Repeating property will not change. - if ((vector[0].compareTo(leftValue) < 0 || vector[0].compareTo(rightValue) > 0)) { + if ((DecimalUtil.compare(vector[0], leftValue) < 0 || DecimalUtil.compare(vector[0], rightValue) > 0)) { // Entire batch is filtered out. batch.size = 0; @@ -81,7 +83,7 @@ int newSize = 0; for(int j = 0; j != n; j++) { int i = sel[j]; - if ((leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) { + if ((DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) { sel[newSize++] = i; } } @@ -89,7 +91,7 @@ } else { int newSize = 0; for(int i = 0; i != n; i++) { - if ((leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) { + if ((DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) { sel[newSize++] = i; } } @@ -104,7 +106,7 @@ // All must be selected otherwise size would be zero. // Repeating property will not change. if (!nullPos[0]) { - if ((vector[0].compareTo(leftValue) < 0 || vector[0].compareTo(rightValue) > 0)) { + if ((DecimalUtil.compare(vector[0], leftValue) < 0 || DecimalUtil.compare(vector[0], rightValue) > 0)) { // Entire batch is filtered out. batch.size = 0; @@ -117,7 +119,7 @@ for(int j = 0; j != n; j++) { int i = sel[j]; if (!nullPos[i]) { - if ((leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) { + if ((DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) { sel[newSize++] = i; } } @@ -129,7 +131,7 @@ int newSize = 0; for(int i = 0; i != n; i++) { if (!nullPos[i]) { - if ((leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) { + if ((DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) { sel[newSize++] = i; } } @@ -152,30 +154,6 @@ return "boolean"; } - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public Decimal128 getLeftValue() { - return leftValue; - } - - public void setLeftValue(Decimal128 value) { - this.leftValue = value; - } - - public Decimal128 getRightValue() { - return rightValue; - } - - public void setRightValue(Decimal128 value) { - this.leftValue = value; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt (working copy) @@ -22,7 +22,7 @@ import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; /** * Generated from template FilterDecimalColumnCompareColumn.txt, which covers binary comparison @@ -57,8 +57,8 @@ boolean[] nullPos1 = inputColVector1.isNull; boolean[] nullPos2 = inputColVector2.isNull; int n = batch.size; - Decimal128[] vector1 = inputColVector1.vector; - Decimal128[] vector2 = inputColVector2.vector; + HiveDecimalWritable[] vector1 = inputColVector1.vector; + HiveDecimalWritable[] vector2 = inputColVector2.vector; // return immediately if batch is empty if (n == 0) { @@ -428,23 +428,7 @@ public int getOutputColumn() { return -1; } - - public int getColNum1() { - return colNum1; - } - public void setColNum1(int colNum1) { - this.colNum1 = colNum1; - } - - public int getColNum2() { - return colNum2; - } - - public void setColNum2(int colNum2) { - this.colNum2 = colNum2; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt (working copy) @@ -20,9 +20,11 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; /** * This is a generated class to evaluate a comparison on a vector of decimal @@ -33,9 +35,9 @@ private static final long serialVersionUID = 1L; private int colNum; - private Decimal128 value; + private HiveDecimal value; - public (int colNum, Decimal128 value) { + public (int colNum, HiveDecimal value) { this.colNum = colNum; this.value = value; } @@ -52,7 +54,7 @@ int[] sel = batch.selected; boolean[] nullPos = inputColVector.isNull; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -63,7 +65,7 @@ if (inputColVector.isRepeating) { // All must be selected otherwise size would be zero. Repeating property will not change. - if (!(vector[0].compareTo(value) 0)) { + if (!(DecimalUtil.compare(vector[0], value) 0)) { // Entire batch is filtered out. batch.size = 0; @@ -72,7 +74,7 @@ int newSize = 0; for(int j = 0; j != n; j++) { int i = sel[j]; - if (vector[i].compareTo(value) 0) { + if (DecimalUtil.compare(vector[i], value) 0) { sel[newSize++] = i; } } @@ -80,7 +82,7 @@ } else { int newSize = 0; for(int i = 0; i != n; i++) { - if (vector[i].compareTo(value) 0) { + if (DecimalUtil.compare(vector[i], value) 0) { sel[newSize++] = i; } } @@ -94,7 +96,7 @@ // All must be selected otherwise size would be zero. Repeating property will not change. if (!nullPos[0]) { - if (!(vector[0].compareTo(value) 0)) { + if (!(DecimalUtil.compare(vector[0], value) 0)) { // Entire batch is filtered out. batch.size = 0; @@ -107,7 +109,7 @@ for(int j = 0; j != n; j++) { int i = sel[j]; if (!nullPos[i]) { - if (vector[i].compareTo(value) 0) { + if (DecimalUtil.compare(vector[i], value) 0) { sel[newSize++] = i; } } @@ -119,7 +121,7 @@ int newSize = 0; for(int i = 0; i != n; i++) { if (!nullPos[i]) { - if (vector[i].compareTo(value) 0) { + if (DecimalUtil.compare(vector[i], value) 0) { sel[newSize++] = i; } } @@ -142,22 +144,6 @@ return "boolean"; } - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public Decimal128 getValue() { - return value; - } - - public void setValue(Decimal128 value) { - this.value = value; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt (working copy) @@ -20,9 +20,11 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; /** * This is a generated class to evaluate a comparison on a vector of decimal @@ -33,9 +35,9 @@ private static final long serialVersionUID = 1L; private int colNum; - private Decimal128 value; + private HiveDecimal value; - public (Decimal128 value, int colNum) { + public (HiveDecimal value, int colNum) { this.colNum = colNum; this.value = value; } @@ -52,7 +54,7 @@ int[] sel = batch.selected; boolean[] nullPos = inputColVector.isNull; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -63,7 +65,7 @@ if (inputColVector.isRepeating) { // All must be selected otherwise size would be zero. Repeating property will not change. - if (!(value.compareTo(vector[0]) 0)) { + if (!(DecimalUtil.compare(value, vector[0]) 0)) { // Entire batch is filtered out. batch.size = 0; @@ -72,7 +74,7 @@ int newSize = 0; for(int j = 0; j != n; j++) { int i = sel[j]; - if (value.compareTo(vector[i]) 0) { + if (DecimalUtil.compare(value, vector[i]) 0) { sel[newSize++] = i; } } @@ -80,7 +82,7 @@ } else { int newSize = 0; for(int i = 0; i != n; i++) { - if (value.compareTo(vector[i]) 0) { + if (DecimalUtil.compare(value, vector[i]) 0) { sel[newSize++] = i; } } @@ -94,7 +96,7 @@ // All must be selected otherwise size would be zero. Repeating property will not change. if (!nullPos[0]) { - if (!(value.compareTo(vector[0]) 0)) { + if (!(DecimalUtil.compare(value, vector[0]) 0)) { // Entire batch is filtered out. batch.size = 0; @@ -107,7 +109,7 @@ for(int j = 0; j != n; j++) { int i = sel[j]; if (!nullPos[i]) { - if (value.compareTo(vector[i]) 0) { + if (DecimalUtil.compare(value, vector[i]) 0) { sel[newSize++] = i; } } @@ -119,7 +121,7 @@ int newSize = 0; for(int i = 0; i != n; i++) { if (!nullPos[i]) { - if (value.compareTo(vector[i]) 0) { + if (DecimalUtil.compare(value, vector[i]) 0) { sel[newSize++] = i; } } @@ -142,22 +144,6 @@ return "boolean"; } - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public Decimal128 getValue() { - return value; - } - - public void setValue(Decimal128 value) { - this.value = value; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt (working copy) @@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; /** * Generated from template ScalarArithmeticColumnDecimal.txt, which covers binary arithmetic @@ -35,10 +36,10 @@ private static final long serialVersionUID = 1L; private int colNum; - private Decimal128 value; + private HiveDecimal value; private int outputColumn; - public (Decimal128 value, int colNum, int outputColumn) { + public (HiveDecimal value, int colNum, int outputColumn) { this.colNum = colNum; this.value = value; this.outputColumn = outputColumn; @@ -64,7 +65,7 @@ outputColVector.noNulls = inputColVector.noNulls; outputColVector.isRepeating = inputColVector.isRepeating; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -126,27 +127,7 @@ public int getOutputColumn() { return outputColumn; } - - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - public Decimal128 getValue() { - return value; - } - - public void setValue(Decimal128 value) { - this.value = value; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt =================================================================== --- ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt (working copy) @@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; /** * Generated from template ScalarDivideColumnDecimal.txt, which covers binary arithmetic @@ -35,10 +36,10 @@ private static final long serialVersionUID = 1L; private int colNum; - private Decimal128 value; + private HiveDecimal value; private int outputColumn; - public (Decimal128 value, int colNum, int outputColumn) { + public (HiveDecimal value, int colNum, int outputColumn) { this.colNum = colNum; this.value = value; this.outputColumn = outputColumn; @@ -64,8 +65,8 @@ outputColVector.noNulls = inputColVector.noNulls; outputColVector.isRepeating = inputColVector.isRepeating; int n = batch.size; - Decimal128[] vector = inputColVector.vector; - Decimal128[] outputVector = outputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; + HiveDecimalWritable[] outputVector = outputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -129,26 +130,6 @@ return outputColumn; } - public int getColNum() { - return colNum; - } - - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public Decimal128 getValue() { - return value; - } - - public void setValue(Decimal128 value) { - this.value = value; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt =================================================================== --- ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.gen; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; @@ -31,6 +30,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.util.JavaDataModel; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; /** @@ -49,7 +49,7 @@ private static final long serialVersionUID = 1L; - transient private final Decimal128 value; + transient private final HiveDecimalWritable value; /** * Value is explicitly (re)initialized in reset() @@ -57,15 +57,16 @@ transient private boolean isNull = true; public Aggregation() { - value = new Decimal128(); + value = new HiveDecimalWritable(); } - public void checkValue(Decimal128 value, short scale) { + public void checkValue(HiveDecimalWritable writable, short scale) { + HiveDecimal value = writable.getHiveDecimal(); if (isNull) { isNull = false; - this.value.update(value); - } else if (this.value.compareTo(value) 0) { - this.value.update(value, scale); + this.value.set(value); + } else if (this.value.getHiveDecimal().compareTo(value) 0) { + this.value.set(value); } } @@ -77,7 +78,7 @@ @Override public void reset () { isNull = true; - value.zeroClear(); + value.set(HiveDecimal.ZERO); } } @@ -124,7 +125,7 @@ DecimalColumnVector inputVector = (DecimalColumnVector)batch. cols[this.inputExpression.getOutputColumn()]; - Decimal128[] vector = inputVector.vector; + HiveDecimalWritable[] vector = inputVector.vector; if (inputVector.noNulls) { if (inputVector.isRepeating) { @@ -170,7 +171,7 @@ private void iterateNoNullsRepeatingWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128 value, + HiveDecimalWritable value, short scale, int batchSize) { @@ -186,7 +187,7 @@ private void iterateNoNullsSelectionWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128[] values, + HiveDecimalWritable[] values, short scale, int[] selection, int batchSize) { @@ -203,7 +204,7 @@ private void iterateNoNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128[] values, + HiveDecimalWritable[] values, short scale, int batchSize) { for (int i=0; i < batchSize; ++i) { @@ -218,7 +219,7 @@ private void iterateHasNullsRepeatingSelectionWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128 value, + HiveDecimalWritable value, short scale, int batchSize, int[] selection, @@ -239,7 +240,7 @@ private void iterateHasNullsRepeatingWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128 value, + HiveDecimalWritable value, short scale, int batchSize, boolean[] isNull) { @@ -258,7 +259,7 @@ private void iterateHasNullsSelectionWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128[] values, + HiveDecimalWritable[] values, short scale, int batchSize, int[] selection, @@ -279,7 +280,7 @@ private void iterateHasNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregrateIndex, - Decimal128[] values, + HiveDecimalWritable[] values, short scale, int batchSize, boolean[] isNull) { @@ -312,13 +313,14 @@ Aggregation myagg = (Aggregation)agg; - Decimal128[] vector = inputVector.vector; + HiveDecimalWritable[] vector = inputVector.vector; if (inputVector.isRepeating) { if (inputVector.noNulls && (myagg.isNull || (myagg.value.compareTo(vector[0]) 0))) { myagg.isNull = false; - myagg.value.update(vector[0], inputVector.scale); + HiveDecimal value = vector[0].getHiveDecimal(); + myagg.value.set(value); } return; } @@ -341,7 +343,7 @@ private void iterateSelectionHasNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, boolean[] isNull, @@ -350,13 +352,13 @@ for (int j=0; j< batchSize; ++j) { int i = selected[j]; if (!isNull[i]) { - Decimal128 value = vector[i]; + HiveDecimal value = vector[i].getHiveDecimal(); if (myagg.isNull) { myagg.isNull = false; - myagg.value.update(value); + myagg.value.set(value); } - else if (myagg.value.compareTo(value) 0) { - myagg.value.update(value, scale); + else if (myagg.value.getHiveDecimal().compareTo(value) 0) { + myagg.value.set(value); } } } @@ -364,40 +366,41 @@ private void iterateSelectionNoNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, int[] selected) { if (myagg.isNull) { - myagg.value.update(vector[selected[0]]); + HiveDecimal value = vector[selected[0]].getHiveDecimal(); + myagg.value.set(value); myagg.isNull = false; } for (int i=0; i< batchSize; ++i) { - Decimal128 value = vector[selected[i]]; - if (myagg.value.compareTo(value) 0) { - myagg.value.update(value, scale); + HiveDecimal value = vector[selected[i]].getHiveDecimal(); + if (myagg.value.getHiveDecimal().compareTo(value) 0) { + myagg.value.set(value); } } } private void iterateNoSelectionHasNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, boolean[] isNull) { for(int i=0;i 0) { - myagg.value.update(value, scale); + else if (myagg.value.getHiveDecimal().compareTo(value) 0) { + myagg.value.set(value); } } } @@ -405,18 +408,19 @@ private void iterateNoSelectionNoNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize) { if (myagg.isNull) { - myagg.value.update(vector[0]); + HiveDecimal value = vector[0].getHiveDecimal(); + myagg.value.set(value); myagg.isNull = false; } for (int i=0;i 0) { - myagg.value.update(value, scale); + HiveDecimal value = vector[i].getHiveDecimal(); + if (myagg.value.getHiveDecimal().compareTo(value) 0) { + myagg.value.set(value); } } } Index: ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt =================================================================== --- ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt (revision 1637277) +++ ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt (working copy) @@ -21,7 +21,7 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; @@ -86,12 +86,12 @@ variance = 0f; } - public void updateValueWithCheckAndInit(Decimal128 value, short scale) { + public void updateValueWithCheckAndInit(HiveDecimalWritable value, short scale) { if (this.isNull) { this.init(); } - double dval = value.doubleValue(); + double dval = value.getHiveDecimal().doubleValue(); this.sum += dval; this.count += 1; if(this.count > 1) { @@ -100,8 +100,8 @@ } } - public void updateValueNoCheck(Decimal128 value, short scale) { - double dval = value.doubleValue(); + public void updateValueNoCheck(HiveDecimalWritable value, short scale) { + double dval = value.getHiveDecimal().doubleValue(); this.sum += dval; this.count += 1; double t = this.count*dval - this.sum; @@ -176,7 +176,7 @@ return; } - Decimal128[] vector = inputVector.vector; + HiveDecimalWritable[] vector = inputVector.vector; if (inputVector.isRepeating) { if (inputVector.noNulls || !inputVector.isNull[0]) { @@ -209,7 +209,7 @@ private void iterateRepeatingNoNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, - Decimal128 value, + HiveDecimalWritable value, short scale, int batchSize) { @@ -225,7 +225,7 @@ private void iterateSelectionHasNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, boolean[] isNull, @@ -238,7 +238,7 @@ j); int i = selected[j]; if (!isNull[i]) { - Decimal128 value = vector[i]; + HiveDecimalWritable value = vector[i]; myagg.updateValueWithCheckAndInit(value, scale); } } @@ -247,7 +247,7 @@ private void iterateSelectionNoNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, int[] selected) { @@ -257,7 +257,7 @@ aggregationBufferSets, aggregateIndex, i); - Decimal128 value = vector[selected[i]]; + HiveDecimalWritable value = vector[selected[i]]; myagg.updateValueWithCheckAndInit(value, scale); } } @@ -265,7 +265,7 @@ private void iterateNoSelectionHasNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, boolean[] isNull) { @@ -276,7 +276,7 @@ aggregationBufferSets, aggregateIndex, i); - Decimal128 value = vector[i]; + HiveDecimalWritable value = vector[i]; myagg.updateValueWithCheckAndInit(value, scale); } } @@ -285,7 +285,7 @@ private void iterateNoSelectionNoNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize) { @@ -294,7 +294,7 @@ aggregationBufferSets, aggregateIndex, i); - Decimal128 value = vector[i]; + HiveDecimalWritable value = vector[i]; myagg.updateValueWithCheckAndInit(value, scale); } } @@ -316,7 +316,7 @@ Aggregation myagg = (Aggregation)agg; - Decimal128[] vector = inputVector.vector; + HiveDecimalWritable[] vector = inputVector.vector; if (inputVector.isRepeating) { if (inputVector.noNulls) { @@ -340,7 +340,7 @@ private void iterateRepeatingNoNulls( Aggregation myagg, - Decimal128 value, + HiveDecimalWritable value, short scale, int batchSize) { @@ -357,7 +357,7 @@ private void iterateSelectionHasNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, boolean[] isNull, @@ -366,7 +366,7 @@ for (int j=0; j< batchSize; ++j) { int i = selected[j]; if (!isNull[i]) { - Decimal128 value = vector[i]; + HiveDecimalWritable value = vector[i]; myagg.updateValueWithCheckAndInit(value, scale); } } @@ -374,7 +374,7 @@ private void iterateSelectionNoNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, int[] selected) { @@ -383,7 +383,7 @@ myagg.init (); } - Decimal128 value = vector[selected[0]]; + HiveDecimalWritable value = vector[selected[0]]; myagg.updateValueWithCheckAndInit(value, scale); // i=0 was pulled out to remove the count > 1 check in the loop @@ -396,14 +396,14 @@ private void iterateNoSelectionHasNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, short scale, int batchSize, boolean[] isNull) { for(int i=0;i 1 check Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -472,7 +472,8 @@ if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) { String explainOutput = getExplainOutput(sem, plan, tree.dump()); if (explainOutput != null) { - LOG.info("EXPLAIN output: " + explainOutput); + LOG.info("EXPLAIN output for queryid " + queryId + " : " + + explainOutput); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java (working copy) @@ -134,6 +134,19 @@ sources = ((TezContext) MapredContext.get()).getRecordSources(); } + @Override + public void endGroup() throws HiveException { + // we do not want the end group to cause a checkAndGenObject + defaultEndGroup(); + } + + @Override + public void startGroup() throws HiveException { + // we do not want the start group to clear the storage + defaultStartGroup(); + } + + /* * (non-Javadoc) * @@ -275,7 +288,7 @@ if (foundNextKeyGroup[t]) { // first promote the next group to be the current group if we reached a // new group in the previous fetch - if ((this.nextKeyWritables[t] != null) || (this.fetchDone[t] == false)) { + if (this.nextKeyWritables[t] != null) { promoteNextGroupToCandidate(t); } else { this.keyWritables[t] = null; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java (working copy) @@ -192,7 +192,6 @@ return "EXPLAIN REWRITE"; } - @Override public List getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List colList = new ArrayList(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (working copy) @@ -950,7 +950,6 @@ return "EXPLAIN"; } - @Override public List getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List colList = new ArrayList(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java (working copy) @@ -31,6 +31,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import java.util.ArrayList; +import java.util.List; + /** * ExprNodeGenericFuncEvaluator. * @@ -44,6 +47,7 @@ transient Object rowObject; transient ExprNodeEvaluator[] children; transient GenericUDF.DeferredObject[] deferredChildren; + transient GenericUDF.DeferredObject[] childrenNeedingPrepare; transient boolean isEager; transient boolean isConstant = false; @@ -73,6 +77,10 @@ } } + public boolean needsPrepare() { + return !(eval instanceof ExprNodeConstantEvaluator || eval instanceof ExprNodeNullEvaluator); + } + public Object get() throws HiveException { if (!evaluated) { obj = eval.evaluate(rowObject, version); @@ -113,9 +121,17 @@ @Override public ObjectInspector initialize(ObjectInspector rowInspector) throws HiveException { deferredChildren = new GenericUDF.DeferredObject[children.length]; + List childrenNeedingPrepare = + new ArrayList(children.length); for (int i = 0; i < deferredChildren.length; i++) { - deferredChildren[i] = new DeferredExprObject(children[i], isEager); + DeferredExprObject deferredExprObject = new DeferredExprObject(children[i], isEager); + deferredChildren[i] = deferredExprObject; + if (deferredExprObject.needsPrepare()) { + childrenNeedingPrepare.add(deferredExprObject); + } } + this.childrenNeedingPrepare = + childrenNeedingPrepare.toArray(new GenericUDF.DeferredObject[childrenNeedingPrepare.size()]); // Initialize all children first ObjectInspector[] childrenOIs = new ObjectInspector[children.length]; for (int i = 0; i < children.length; i++) { @@ -163,8 +179,8 @@ return ((ConstantObjectInspector) outputOI).getWritableConstantValue(); } rowObject = row; - for (int i = 0; i < deferredChildren.length; i++) { - deferredChildren[i].prepare(version); + for (GenericUDF.DeferredObject deferredObject : childrenNeedingPrepare) { + deferredObject.prepare(version); } return genericUDF.evaluate(deferredChildren); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (working copy) @@ -170,6 +170,8 @@ public static final String LAG_FUNC_NAME = "lag"; public static final String LAST_VALUE_FUNC_NAME = "last_value"; + public static final String UNARY_PLUS_FUNC_NAME = "positive"; + public static final String UNARY_MINUS_FUNC_NAME = "negative"; public static final String WINDOWING_TABLE_FUNCTION = "windowingtablefunction"; private static final String NOOP_TABLE_FUNCTION = "noop"; @@ -252,8 +254,8 @@ registerGenericUDF("str_to_map", GenericUDFStringToMap.class); registerGenericUDF("translate", GenericUDFTranslate.class); - registerGenericUDF("positive", GenericUDFOPPositive.class); - registerGenericUDF("negative", GenericUDFOPNegative.class); + registerGenericUDF(UNARY_PLUS_FUNC_NAME, GenericUDFOPPositive.class); + registerGenericUDF(UNARY_MINUS_FUNC_NAME, GenericUDFOPNegative.class); registerUDF("day", UDFDayOfMonth.class, false); registerUDF("dayofmonth", UDFDayOfMonth.class, false); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (working copy) @@ -1093,8 +1093,9 @@ @SuppressWarnings("unchecked") T descClone = (T)conf.clone(); + // also clone the colExprMap by default Operator ret = - OperatorFactory.getAndMakeChild(descClone, getSchema(), parentClones); + OperatorFactory.getAndMakeChild(descClone, getSchema(), getColumnExprMap(), parentClones); return ret; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (working copy) @@ -30,7 +30,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.lib.Node; @@ -518,9 +517,6 @@ public void shutdown() { } - public List getResultSchema() { - return null; - } Throwable getException() { return exception; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (working copy) @@ -18,11 +18,67 @@ package org.apache.hadoop.hive.ql.exec; -import com.esotericsoftware.kryo.Kryo; -import com.esotericsoftware.kryo.io.Input; -import com.esotericsoftware.kryo.io.Output; -import com.esotericsoftware.kryo.serializers.FieldSerializer; -import com.esotericsoftware.shaded.org.objenesis.strategy.StdInstantiatorStrategy; +import java.beans.DefaultPersistenceDelegate; +import java.beans.Encoder; +import java.beans.ExceptionListener; +import java.beans.Expression; +import java.beans.PersistenceDelegate; +import java.beans.Statement; +import java.beans.XMLDecoder; +import java.beans.XMLEncoder; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Serializable; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URL; +import java.net.URLClassLoader; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.SQLTransientException; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.InflaterInputStream; + import org.antlr.runtime.CommonToken; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.StringUtils; @@ -127,66 +183,11 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; -import java.beans.DefaultPersistenceDelegate; -import java.beans.Encoder; -import java.beans.ExceptionListener; -import java.beans.Expression; -import java.beans.PersistenceDelegate; -import java.beans.Statement; -import java.beans.XMLDecoder; -import java.beans.XMLEncoder; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInput; -import java.io.EOFException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Serializable; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.net.URL; -import java.net.URLClassLoader; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.SQLTransientException; -import java.sql.Timestamp; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collection; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Random; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.zip.Deflater; -import java.util.zip.DeflaterOutputStream; -import java.util.zip.InflaterInputStream; +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.io.Output; +import com.esotericsoftware.kryo.serializers.FieldSerializer; +import com.esotericsoftware.shaded.org.objenesis.strategy.StdInstantiatorStrategy; /** * Utilities. @@ -437,22 +438,11 @@ } } - public static Map> getAllScratchColumnVectorTypeMaps(Configuration hiveConf) { - BaseWork baseWork = getMapWork(hiveConf); - if (baseWork == null) { - baseWork = getReduceWork(hiveConf); - } - return baseWork.getAllScratchColumnVectorTypeMaps(); + public static Map> getMapWorkAllScratchColumnVectorTypeMaps(Configuration hiveConf) { + MapWork mapWork = getMapWork(hiveConf); + return mapWork.getAllScratchColumnVectorTypeMaps(); } - public static Map> getAllColumnVectorMaps(Configuration hiveConf) { - BaseWork baseWork = getMapWork(hiveConf); - if (baseWork == null) { - baseWork = getReduceWork(hiveConf); - } - return baseWork.getAllColumnVectorMaps(); - } - public static void setWorkflowAdjacencies(Configuration conf, QueryPlan plan) { try { Graph stageGraph = plan.getQueryPlan().getStageGraph(); @@ -672,21 +662,33 @@ Path planPath = getPlanPath(conf, name); - OutputStream out; + OutputStream out = null; if (HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) { // add it to the conf ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); - out = new DeflaterOutputStream(byteOut, new Deflater(Deflater.BEST_SPEED)); - serializePlan(w, out, conf); + try { + out = new DeflaterOutputStream(byteOut, new Deflater(Deflater.BEST_SPEED)); + serializePlan(w, out, conf); + out.close(); + out = null; + } finally { + IOUtils.closeStream(out); + } LOG.info("Setting plan: "+planPath.toUri().getPath()); conf.set(planPath.toUri().getPath(), Base64.encodeBase64String(byteOut.toByteArray())); } else { // use the default file system of the conf FileSystem fs = planPath.getFileSystem(conf); - out = fs.create(planPath); - serializePlan(w, out, conf); + try { + out = fs.create(planPath); + serializePlan(w, out, conf); + out.close(); + out = null; + } finally { + IOUtils.closeStream(out); + } // Serialize the plan to the default hdfs instance // Except for hadoop local mode execution where we should be @@ -3174,12 +3176,11 @@ public static int estimateReducers(long totalInputFileSize, long bytesPerReducer, int maxReducers, boolean powersOfTwo) { - - int reducers = (int) ((totalInputFileSize + bytesPerReducer - 1) / bytesPerReducer); + double bytes = Math.max(totalInputFileSize, bytesPerReducer); + int reducers = (int) Math.ceil(bytes / bytesPerReducer); reducers = Math.max(1, reducers); reducers = Math.min(maxReducers, reducers); - int reducersLog = (int)(Math.log(reducers) / Math.log(2)) + 1; int reducersPowerTwo = (int)Math.pow(2, reducersLog); @@ -3218,7 +3219,7 @@ } if (highestSamplePercentage >= 0) { - totalInputFileSize = Math.min((long) (totalInputFileSize * highestSamplePercentage / 100D) + totalInputFileSize = Math.min((long) (totalInputFileSize * (highestSamplePercentage / 100D)) , totalInputFileSize); } return totalInputFileSize; @@ -3242,7 +3243,7 @@ } if (highestSamplePercentage >= 0) { - totalInputNumFiles = Math.min((long) (totalInputNumFiles * highestSamplePercentage / 100D) + totalInputNumFiles = Math.min((long) (totalInputNumFiles * (highestSamplePercentage / 100D)) , totalInputNumFiles); } return totalInputNumFiles; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java (working copy) @@ -26,6 +26,7 @@ import java.util.Map; import java.util.Properties; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; @@ -38,8 +39,8 @@ import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.session.SessionState; @@ -170,11 +171,19 @@ // write out the plan to a local file Path planPath = new Path(ctx.getLocalTmpPath(), "plan.xml"); - OutputStream out = FileSystem.getLocal(conf).create(planPath); MapredWork plan = getWork(); LOG.info("Generating plan file " + planPath.toString()); - Utilities.serializePlan(plan, out, conf); + OutputStream out = null; + try { + out = FileSystem.getLocal(conf).create(planPath); + Utilities.serializePlan(plan, out, conf); + out.close(); + out = null; + } finally { + IOUtils.closeQuietly(out); + } + String isSilent = "true".equalsIgnoreCase(System .getProperty("test.silent")) ? "-nolog" : ""; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java (working copy) @@ -31,6 +31,7 @@ import java.util.Map; import java.util.Properties; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -151,11 +152,20 @@ conf.setVar(ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); // write out the plan to a local file Path planPath = new Path(ctx.getLocalTmpPath(), "plan.xml"); - OutputStream out = FileSystem.getLocal(conf).create(planPath); MapredLocalWork plan = getWork(); LOG.info("Generating plan file " + planPath.toString()); - Utilities.serializePlan(plan, out, conf); + OutputStream out = null; + try { + out = FileSystem.getLocal(conf).create(planPath); + Utilities.serializePlan(plan, out, conf); + out.close(); + out = null; + } finally { + IOUtils.closeQuietly(out); + } + + String isSilent = "true".equalsIgnoreCase(System.getProperty("test.silent")) ? "-nolog" : ""; String jarCmd; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java (working copy) @@ -795,11 +795,12 @@ String hdfsDirPathStr, Configuration conf) throws IOException, LoginException { List tmpResources = new ArrayList(); - addTempFiles(conf, tmpResources, hdfsDirPathStr, getTempFilesFromConf(conf)); + addTempResources(conf, tmpResources, hdfsDirPathStr, LocalResourceType.FILE, getTempFilesFromConf(conf)); + addTempResources(conf, tmpResources, hdfsDirPathStr, LocalResourceType.ARCHIVE, getTempArchivesFromConf(conf)); return tmpResources; } - public static String[] getTempFilesFromConf(Configuration conf) { + private static String[] getTempFilesFromConf(Configuration conf) { String addedFiles = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE); if (StringUtils.isNotBlank(addedFiles)) { HiveConf.setVar(conf, ConfVars.HIVEADDEDFILES, addedFiles); @@ -808,19 +809,23 @@ if (StringUtils.isNotBlank(addedJars)) { HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars); } - String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE); - if (StringUtils.isNotBlank(addedArchives)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDARCHIVES, addedArchives); - } - String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); // need to localize the additional jars and files // we need the directory on hdfs to which we shall put all these files - String allFiles = auxJars + "," + addedJars + "," + addedFiles + "," + addedArchives; + String allFiles = auxJars + "," + addedJars + "," + addedFiles; return allFiles.split(","); } + private static String[] getTempArchivesFromConf(Configuration conf) { + String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE); + if (StringUtils.isNotBlank(addedArchives)) { + HiveConf.setVar(conf, ConfVars.HIVEADDEDARCHIVES, addedArchives); + return addedArchives.split(","); + } + return new String[0]; + } + /** * Localizes files, archives and jars from a provided array of names. * @param hdfsDirPathStr Destination directory in HDFS. @@ -834,12 +839,13 @@ String[] inputOutputJars) throws IOException, LoginException { if (inputOutputJars == null) return null; List tmpResources = new ArrayList(); - addTempFiles(conf, tmpResources, hdfsDirPathStr, inputOutputJars); + addTempResources(conf, tmpResources, hdfsDirPathStr, LocalResourceType.FILE, inputOutputJars); return tmpResources; } - private void addTempFiles(Configuration conf, + private void addTempResources(Configuration conf, List tmpResources, String hdfsDirPathStr, + LocalResourceType type, String[] files) throws IOException { for (String file : files) { if (!StringUtils.isNotBlank(file)) { @@ -847,7 +853,7 @@ } Path hdfsFilePath = new Path(hdfsDirPathStr, getResourceBaseName(new Path(file))); LocalResource localResource = localizeResource(new Path(file), - hdfsFilePath, conf); + hdfsFilePath, type, conf); tmpResources.add(localResource); } } @@ -925,11 +931,12 @@ /** * @param src path to the source for the resource * @param dest path in hdfs for the resource + * @param type local resource type (File/Archive) * @param conf * @return localresource from tez localization. * @throws IOException when any file system related calls fails. */ - public LocalResource localizeResource(Path src, Path dest, Configuration conf) + public LocalResource localizeResource(Path src, Path dest, LocalResourceType type, Configuration conf) throws IOException { FileSystem destFS = dest.getFileSystem(conf); @@ -970,7 +977,7 @@ } } - return createLocalResource(destFS, dest, LocalResourceType.FILE, + return createLocalResource(destFS, dest, type, LocalResourceVisibility.PRIVATE); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java (working copy) @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.List; import java.util.Map; @@ -73,10 +74,10 @@ // for different tags private SerDe inputValueDeserializer; - TableDesc keyTableDesc; - TableDesc valueTableDesc; + private TableDesc keyTableDesc; + private TableDesc valueTableDesc; - ObjectInspector rowObjectInspector; + private ObjectInspector rowObjectInspector; private Operator reducer; private Object keyObject = null; @@ -84,8 +85,6 @@ private boolean vectorized = false; - List row = new ArrayList(Utilities.reduceFieldNameList.size()); - private DataOutputBuffer keyBuffer; private DataOutputBuffer valueBuffer; private VectorizedRowBatchCtx batchContext; @@ -111,7 +110,7 @@ private Iterable valueWritables; - private final boolean grouped = true; + private final GroupIterator groupIterator = new GroupIterator(); void init(JobConf jconf, Operator reducer, boolean vectorized, TableDesc keyTableDesc, TableDesc valueTableDesc, KeyValuesReader reader, boolean handleGroupKey, byte tag, @@ -207,13 +206,19 @@ @Override public final boolean isGrouped() { - return grouped; + return vectorized; } @Override public boolean pushRecord() throws HiveException { BytesWritable keyWritable; + if (!vectorized && groupIterator.hasNext()) { + // if we have records left in the group we push one of those + groupIterator.next(); + return true; + } + try { if (!reader.next()) { return false; @@ -245,11 +250,13 @@ reducer.setGroupKeyObject(keyObject); } - /* this.keyObject passed via reference */ if(vectorized) { processVectors(valueWritables, tag); } else { - processKeyValues(valueWritables, tag); + groupIterator.initialize(valueWritables, keyObject, tag); + if (groupIterator.hasNext()) { + groupIterator.next(); // push first record of group + } } return true; } catch (Throwable e) { @@ -279,16 +286,29 @@ } } - /** - * @param values - * @return true if it is not done and can take more inputs - */ - private void processKeyValues(Iterable values, byte tag) throws HiveException { - List passDownKey = null; - for (Object value : values) { + private class GroupIterator { + private final List row = new ArrayList(Utilities.reduceFieldNameList.size()); + private List passDownKey = null; + private Iterator values; + private byte tag; + private Object keyObject; + + public void initialize(Iterable values, Object keyObject, byte tag) { + this.passDownKey = null; + this.values = values.iterator(); + this.tag = tag; + this.keyObject = keyObject; + } + + public boolean hasNext() { + return values != null && values.hasNext(); + } + + public void next() throws HiveException { + row.clear(); + Object value = values.next(); BytesWritable valueWritable = (BytesWritable) value; - row.clear(); if (passDownKey == null) { row.add(this.keyObject); } else { @@ -387,7 +407,6 @@ } catch (Exception e) { if (!abort) { // signal new failure to map-reduce - l4j.error("Hit error while closing operators - failing tree"); throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java (working copy) @@ -19,30 +19,49 @@ package org.apache.hadoop.hive.ql.exec.tez; import static org.apache.tez.dag.api.client.DAGStatus.State.RUNNING; +import static org.fusesource.jansi.Ansi.ansi; +import static org.fusesource.jansi.internal.CLibrary.STDOUT_FILENO; +import static org.fusesource.jansi.internal.CLibrary.isatty; -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.Heartbeater; +import org.apache.hadoop.hive.ql.exec.MapOperator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.tez.common.counters.TaskCounter; +import org.apache.tez.common.counters.TezCounter; +import org.apache.tez.common.counters.TezCounters; +import org.apache.tez.dag.api.DAG; import org.apache.tez.dag.api.TezException; +import org.apache.tez.dag.api.Vertex; import org.apache.tez.dag.api.client.DAGClient; import org.apache.tez.dag.api.client.DAGStatus; import org.apache.tez.dag.api.client.Progress; import org.apache.tez.dag.api.client.StatusGetOpts; +import org.apache.tez.dag.api.client.VertexStatus; +import org.fusesource.jansi.Ansi; +import java.io.IOException; +import java.io.PrintStream; +import java.text.DecimalFormat; +import java.text.NumberFormat; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +import jline.Terminal; + /** * TezJobMonitor keeps track of a tez job while it's being executed. It will * print status to the console and retrieve final status of the job after @@ -50,16 +69,47 @@ */ public class TezJobMonitor { - private static final Log LOG = LogFactory.getLog(TezJobMonitor.class.getName()); private static final String CLASS_NAME = TezJobMonitor.class.getName(); + private static final int MIN_TERMINAL_WIDTH = 80; + private static final int COLUMN_1_WIDTH = 16; + private static final int SEPARATOR_WIDTH = 80; + // keep this within 80 chars width. If more columns needs to be added then update min terminal + // width requirement and separator width accordingly + private static final String HEADER_FORMAT = "%16s%12s %5s %9s %7s %7s %6s %6s"; + private static final String VERTEX_FORMAT = "%-16s%12s %5s %9s %7s %7s %6s %6s"; + private static final String FOOTER_FORMAT = "%-15s %-30s %-4s %-25s"; + private static final String HEADER = String.format(HEADER_FORMAT, + "VERTICES", "STATUS", "TOTAL", "COMPLETED", "RUNNING", "PENDING", "FAILED", "KILLED"); + + // method and dag summary format + private static final String SUMMARY_HEADER_FORMAT = "%-16s %-12s %-12s %-12s %-19s %-19s %-15s %-15s %-15s"; + private static final String SUMMARY_VERTEX_FORMAT = "%-16s %11s %16s %12s %16s %18s %18s %14s %16s"; + private static final String SUMMARY_HEADER = String.format(SUMMARY_HEADER_FORMAT, + "VERTICES", "TOTAL_TASKS", "FAILED_ATTEMPTS", "KILLED_TASKS", "DURATION_SECONDS", + "CPU_TIME_MILLIS", "GC_TIME_MILLIS", "INPUT_RECORDS", "OUTPUT_RECORDS"); + + private static final String TOTAL_PREP_TIME = "TotalPrepTime"; + private static final String METHOD = "METHOD"; + private static final String DURATION = "DURATION(ms)"; + + // in-place progress update related variables + private int lines; + private PrintStream out; + private String separator; + private transient LogHelper console; private final PerfLogger perfLogger = PerfLogger.getPerfLogger(); private final int checkInterval = 200; private final int maxRetryInterval = 2500; private final int printInterval = 3000; + private final int progressBarChars = 30; private long lastPrintTime; private Set completed; + + /* Pretty print the values */ + private final NumberFormat secondsFormat; + private final NumberFormat commaFormat; private static final List shutdownList; static { @@ -83,20 +133,111 @@ } public TezJobMonitor() { - console = new LogHelper(LOG); + console = SessionState.getConsole(); + secondsFormat = new DecimalFormat("#0.00"); + commaFormat = NumberFormat.getNumberInstance(Locale.US); + // all progress updates are written to info stream and log file. In-place updates can only be + // done to info stream (console) + out = console.getInfoStream(); + separator = ""; + for (int i = 0; i < SEPARATOR_WIDTH; i++) { + separator += "-"; + } } + private static boolean isUnixTerminal() { + + String os = System.getProperty("os.name"); + if (os.startsWith("Windows")) { + // we do not support Windows, we will revisit this if we really need it for windows. + return false; + } + + // We must be on some unix variant.. + // check if standard out is a terminal + try { + // isatty system call will return 1 if the file descriptor is terminal else 0 + if (isatty(STDOUT_FILENO) == 0) { + return false; + } + } catch (NoClassDefFoundError ignore) { + // These errors happen if the JNI lib is not available for your platform. + return false; + } catch (UnsatisfiedLinkError ignore) { + // These errors happen if the JNI lib is not available for your platform. + return false; + } + return true; + } + /** - * monitorExecution handles status printing, failures during execution and final - * status retrieval. + * NOTE: Use this method only if isUnixTerminal is true. + * Erases the current line and prints the given line. + * @param line - line to print + */ + public void reprintLine(String line) { + out.print(ansi().eraseLine(Ansi.Erase.ALL).a(line).a('\n').toString()); + out.flush(); + lines++; + } + + /** + * NOTE: Use this method only if isUnixTerminal is true. + * Erases the current line and prints the given line with the specified color. + * @param line - line to print + * @param color - color for the line + */ + public void reprintLineWithColorAsBold(String line, Ansi.Color color) { + out.print(ansi().eraseLine(Ansi.Erase.ALL).fg(color).bold().a(line).a('\n').boldOff().reset() + .toString()); + out.flush(); + lines++; + } + + /** + * NOTE: Use this method only if isUnixTerminal is true. + * Erases the current line and prints the given multiline. Make sure the specified line is not + * terminated by linebreak. + * @param line - line to print + */ + public void reprintMultiLine(String line) { + int numLines = line.split("\r\n|\r|\n").length; + out.print(ansi().eraseLine(Ansi.Erase.ALL).a(line).a('\n').toString()); + out.flush(); + lines += numLines; + } + + /** + * NOTE: Use this method only if isUnixTerminal is true. + * Repositions the cursor back to line 0. + */ + public void repositionCursor() { + if (lines > 0) { + out.print(ansi().cursorUp(lines).toString()); + out.flush(); + lines = 0; + } + } + + /** + * NOTE: Use this method only if isUnixTerminal is true. + * Gets the width of the terminal + * @return - width of terminal + */ + public int getTerminalWidth() { + return Terminal.getTerminal().getTerminalWidth(); + } + + /** + * monitorExecution handles status printing, failures during execution and final status retrieval. * * @param dagClient client that was used to kick off the job * @param txnMgr transaction manager for this operation * @param conf configuration file for this operation * @return int 0 - success, 1 - killed, 2 - failed */ - public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, - HiveConf conf) throws InterruptedException { + public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, HiveConf conf, + DAG dag) throws InterruptedException { DAGStatus status = null; completed = new HashSet(); @@ -109,14 +250,30 @@ Set opts = new HashSet(); Heartbeater heartbeater = new Heartbeater(txnMgr, conf); long startTime = 0; + boolean isProfileEnabled = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY); + boolean inPlaceUpdates = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_INPLACE_PROGRESS); + boolean wideTerminal = false; + boolean isTerminal = inPlaceUpdates == true ? isUnixTerminal() : false; + // we need at least 80 chars wide terminal to display in-place updates properly + if (isTerminal) { + if (getTerminalWidth() >= MIN_TERMINAL_WIDTH) { + wideTerminal = true; + } + } + + boolean inPlaceEligible = false; + if (inPlaceUpdates && isTerminal && wideTerminal && !console.getIsSilent()) { + inPlaceEligible = true; + } + shutdownList.add(dagClient); console.printInfo("\n"); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_DAG); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING); - while(true) { + while (true) { try { status = dagClient.getDAGStatus(opts); @@ -127,7 +284,7 @@ if (state != lastState || state == RUNNING) { lastState = state; - switch(state) { + switch (state) { case SUBMITTED: console.printInfo("Status: Submitted"); break; @@ -138,23 +295,49 @@ if (!running) { perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING); console.printInfo("Status: Running (" + dagClient.getExecutionContext() + ")\n"); - for (String s: progressMap.keySet()) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); - } startTime = System.currentTimeMillis(); running = true; } - lastReport = printStatus(progressMap, lastReport, console); + if (inPlaceEligible) { + printStatusInPlace(progressMap, startTime, false, dagClient); + // log the progress report to log file as well + console.logInfo(getReport(progressMap)); + } else { + lastReport = printStatus(progressMap, lastReport, console); + } break; case SUCCEEDED: - lastReport = printStatus(progressMap, lastReport, console); - double duration = (System.currentTimeMillis() - startTime)/1000.0; - console.printInfo("Status: Finished successfully in " + String.format("%.2f seconds", duration)); + if (inPlaceEligible) { + printStatusInPlace(progressMap, startTime, false, dagClient); + // log the progress report to log file as well + console.logInfo(getReport(progressMap)); + } else { + lastReport = printStatus(progressMap, lastReport, console); + } + + /* Profile info is collected anyways, isProfileEnabled + * decides if it gets printed or not + */ + if (isProfileEnabled) { + + double duration = (System.currentTimeMillis() - startTime) / 1000.0; + console.printInfo("Status: DAG finished successfully in " + + String.format("%.2f seconds", duration)); + console.printInfo("\n"); + + printMethodsSummary(); + printDagSummary(progressMap, console, dagClient, conf, dag); + } running = false; done = true; break; case KILLED: + if (inPlaceEligible) { + printStatusInPlace(progressMap, startTime, true, dagClient); + // log the progress report to log file as well + console.logInfo(getReport(progressMap)); + } console.printInfo("Status: Killed"); running = false; done = true; @@ -162,6 +345,11 @@ break; case FAILED: case ERROR: + if (inPlaceEligible) { + printStatusInPlace(progressMap, startTime, true, dagClient); + // log the progress report to log file as well + console.logInfo(getReport(progressMap)); + } console.printError("Status: Failed"); running = false; done = true; @@ -173,15 +361,15 @@ Thread.sleep(checkInterval); } } catch (Exception e) { - console.printInfo("Exception: "+e.getMessage()); - if (++failedCounter % maxRetryInterval/checkInterval == 0 + console.printInfo("Exception: " + e.getMessage()); + if (++failedCounter % maxRetryInterval / checkInterval == 0 || e instanceof InterruptedException) { try { console.printInfo("Killing DAG..."); dagClient.tryKillDAG(); - } catch(IOException io) { + } catch (IOException io) { // best effort - } catch(TezException te) { + } catch (TezException te) { // best effort } e.printStackTrace(); @@ -194,7 +382,7 @@ } finally { if (done) { if (rc != 0 && status != null) { - for (String diag: status.getDiagnostics()) { + for (String diag : status.getDiagnostics()) { console.printError(diag); } } @@ -222,7 +410,338 @@ } } + private static long getCounterValueByGroupName(TezCounters vertexCounters, + String groupNamePattern, + String counterName) { + TezCounter tezCounter = vertexCounters.getGroup(groupNamePattern).findCounter(counterName); + return (tezCounter == null) ? 0 : tezCounter.getValue(); + } + + private void printMethodsSummary() { + long totalInPrepTime = 0; + + String[] perfLoggerReportMethods = { + (PerfLogger.PARSE), + (PerfLogger.ANALYZE), + (PerfLogger.TEZ_BUILD_DAG), + (PerfLogger.TEZ_SUBMIT_TO_RUNNING) + }; + + /* Build the method summary header */ + String methodBreakdownHeader = String.format("%-30s %-13s", METHOD, DURATION); + console.printInfo(methodBreakdownHeader); + + for (String method : perfLoggerReportMethods) { + long duration = perfLogger.getDuration(method); + totalInPrepTime += duration; + console.printInfo(String.format("%-30s %11s", method, commaFormat.format(duration))); + } + + /* + * The counters list above don't capture the total time from TimeToSubmit.startTime till + * TezRunDag.startTime, so calculate the duration and print it. + */ + totalInPrepTime = perfLogger.getStartTime(PerfLogger.TEZ_RUN_DAG) - + perfLogger.getStartTime(PerfLogger.TIME_TO_SUBMIT); + + console.printInfo(String.format("%-30s %11s\n", TOTAL_PREP_TIME, commaFormat.format( + totalInPrepTime))); + } + + private void printDagSummary(Map progressMap, LogHelper console, + DAGClient dagClient, HiveConf conf, DAG dag) { + + /* Strings for headers and counters */ + String hiveCountersGroup = conf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP); + Set statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS); + TezCounters hiveCounters = null; + try { + hiveCounters = dagClient.getDAGStatus(statusGetOpts).getDAGCounters(); + } catch (IOException e) { + // best attempt, shouldn't really kill DAG for this + } catch (TezException e) { + // best attempt, shouldn't really kill DAG for this + } + + /* If the counters are missing there is no point trying to print progress */ + if (hiveCounters == null) { + return; + } + + /* Print the per Vertex summary */ + console.printInfo(SUMMARY_HEADER); + SortedSet keys = new TreeSet(progressMap.keySet()); + Set statusOptions = new HashSet(1); + statusOptions.add(StatusGetOpts.GET_COUNTERS); + for (String vertexName : keys) { + Progress progress = progressMap.get(vertexName); + if (progress != null) { + final int totalTasks = progress.getTotalTaskCount(); + final int failedTaskAttempts = progress.getFailedTaskAttemptCount(); + final int killedTasks = progress.getKilledTaskCount(); + final double duration = + perfLogger.getDuration(PerfLogger.TEZ_RUN_VERTEX + vertexName) / 1000.0; + VertexStatus vertexStatus = null; + try { + vertexStatus = dagClient.getVertexStatus(vertexName, statusOptions); + } catch (IOException e) { + // best attempt, shouldn't really kill DAG for this + } catch (TezException e) { + // best attempt, shouldn't really kill DAG for this + } + + if (vertexStatus == null) { + continue; + } + + Vertex currentVertex = dag.getVertex(vertexName); + List inputVerticesList = currentVertex.getInputVertices(); + long hiveInputRecordsFromOtherVertices = 0; + if (inputVerticesList.size() > 0) { + + for (Vertex inputVertex : inputVerticesList) { + String inputVertexName = inputVertex.getName(); + hiveInputRecordsFromOtherVertices += getCounterValueByGroupName(hiveCounters, + hiveCountersGroup, String.format("%s_", + ReduceSinkOperator.Counter.RECORDS_OUT_INTERMEDIATE.toString()) + + inputVertexName.replace(" ", "_")); + + hiveInputRecordsFromOtherVertices += getCounterValueByGroupName(hiveCounters, + hiveCountersGroup, String.format("%s_", + FileSinkOperator.Counter.RECORDS_OUT.toString()) + + inputVertexName.replace(" ", "_")); + } + } + + /* + * Get the CPU & GC + * + * counters org.apache.tez.common.counters.TaskCounter + * GC_TIME_MILLIS=37712 + * CPU_MILLISECONDS=2774230 + */ + final TezCounters vertexCounters = vertexStatus.getVertexCounters(); + final double cpuTimeMillis = getCounterValueByGroupName(vertexCounters, + TaskCounter.class.getName(), + TaskCounter.CPU_MILLISECONDS.name()); + + final double gcTimeMillis = getCounterValueByGroupName(vertexCounters, + TaskCounter.class.getName(), + TaskCounter.GC_TIME_MILLIS.name()); + + /* + * Get the HIVE counters + * + * HIVE + * CREATED_FILES=1 + * DESERIALIZE_ERRORS=0 + * RECORDS_IN_Map_1=550076554 + * RECORDS_OUT_INTERMEDIATE_Map_1=854987 + * RECORDS_OUT_Reducer_2=1 + */ + + final long hiveInputRecords = + getCounterValueByGroupName( + hiveCounters, + hiveCountersGroup, + String.format("%s_", MapOperator.Counter.RECORDS_IN.toString()) + + vertexName.replace(" ", "_")) + + hiveInputRecordsFromOtherVertices; + final long hiveOutputIntermediateRecords = + getCounterValueByGroupName( + hiveCounters, + hiveCountersGroup, + String.format("%s_", ReduceSinkOperator.Counter.RECORDS_OUT_INTERMEDIATE.toString()) + + vertexName.replace(" ", "_")); + final long hiveOutputRecords = + getCounterValueByGroupName( + hiveCounters, + hiveCountersGroup, + String.format("%s_", FileSinkOperator.Counter.RECORDS_OUT.toString()) + + vertexName.replace(" ", "_")) + + hiveOutputIntermediateRecords; + + String vertexExecutionStats = String.format(SUMMARY_VERTEX_FORMAT, + vertexName, + totalTasks, + failedTaskAttempts, + killedTasks, + secondsFormat.format((duration)), + commaFormat.format(cpuTimeMillis), + commaFormat.format(gcTimeMillis), + commaFormat.format(hiveInputRecords), + commaFormat.format(hiveOutputRecords)); + console.printInfo(vertexExecutionStats); + } + } + } + + private void printStatusInPlace(Map progressMap, long startTime, + boolean vextexStatusFromAM, DAGClient dagClient) { + StringBuffer reportBuffer = new StringBuffer(); + int sumComplete = 0; + int sumTotal = 0; + + // position the cursor to line 0 + repositionCursor(); + + // print header + // ------------------------------------------------------------------------------- + // VERTICES STATUS TOTAL COMPLETED RUNNING PENDING FAILED KILLED + // ------------------------------------------------------------------------------- + reprintLine(separator); + reprintLineWithColorAsBold(HEADER, Ansi.Color.CYAN); + reprintLine(separator); + + SortedSet keys = new TreeSet(progressMap.keySet()); + int idx = 0; + int maxKeys = keys.size(); + for (String s : keys) { + idx++; + Progress progress = progressMap.get(s); + final int complete = progress.getSucceededTaskCount(); + final int total = progress.getTotalTaskCount(); + final int running = progress.getRunningTaskCount(); + final int failed = progress.getFailedTaskAttemptCount(); + final int pending = progress.getTotalTaskCount() - progress.getSucceededTaskCount() - + progress.getRunningTaskCount(); + final int killed = progress.getKilledTaskCount(); + + // To get vertex status we can use DAGClient.getVertexStatus(), but it will be expensive to + // get status from AM for every refresh of the UI. Lets infer the state from task counts. + // Only if DAG is FAILED or KILLED the vertex status is fetched from AM. + VertexStatus.State vertexState = VertexStatus.State.INITIALIZING; + + // INITED state + if (total > 0) { + vertexState = VertexStatus.State.INITED; + sumComplete += complete; + sumTotal += total; + } + + // RUNNING state + if (complete < total && (complete > 0 || running > 0 || failed > 0)) { + vertexState = VertexStatus.State.RUNNING; + if (!perfLogger.startTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + } + + // SUCCEEDED state + if (complete == total) { + vertexState = VertexStatus.State.SUCCEEDED; + if (!completed.contains(s)) { + completed.add(s); + + /* We may have missed the start of the vertex + * due to the 3 seconds interval + */ + if (!perfLogger.startTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + } + + // DAG might have been killed, lets try to get vertex state from AM before dying + // KILLED or FAILED state + if (vextexStatusFromAM) { + VertexStatus vertexStatus = null; + try { + vertexStatus = dagClient.getVertexStatus(s, null); + } catch (IOException e) { + // best attempt, shouldn't really kill DAG for this + } catch (TezException e) { + // best attempt, shouldn't really kill DAG for this + } + if (vertexStatus != null) { + vertexState = vertexStatus.getState(); + } + } + + // Map 1 .......... SUCCEEDED 7 7 0 0 0 0 + String nameWithProgress = getNameWithProgress(s, complete, total); + String vertexStr = String.format(VERTEX_FORMAT, + nameWithProgress, + vertexState.toString(), + total, + complete, + running, + pending, + failed, + killed); + reportBuffer.append(vertexStr); + if (idx != maxKeys) { + reportBuffer.append("\n"); + } + } + + reprintMultiLine(reportBuffer.toString()); + + // ------------------------------------------------------------------------------- + // VERTICES: 03/04 [=================>>-----] 86% ELAPSED TIME: 1.71 s + // ------------------------------------------------------------------------------- + reprintLine(separator); + final float progress = (sumTotal == 0) ? 0.0f : (float) sumComplete / (float) sumTotal; + String footer = getFooter(keys.size(), completed.size(), progress, startTime); + reprintLineWithColorAsBold(footer, Ansi.Color.RED); + reprintLine(separator); + } + + // Map 1 .......... + private String getNameWithProgress(String s, int complete, int total) { + float percent = total == 0 ? 0.0f : (float) complete / (float) total; + // lets use the remaining space in column 1 as progress bar + int spaceRemaining = COLUMN_1_WIDTH - s.length() - 1; + String result = s + " "; + int toFill = (int) (spaceRemaining * percent); + for (int i = 0; i < toFill; i++) { + result += "."; + } + return result; + } + + // VERTICES: 03/04 [==================>>-----] 86% ELAPSED TIME: 1.71 s + private String getFooter(int keySize, int completedSize, float progress, long startTime) { + String verticesSummary = String.format("VERTICES: %02d/%02d", completedSize, keySize); + String progressBar = getInPlaceProgressBar(progress); + final int progressPercent = (int) (progress * 100); + String progressStr = "" + progressPercent + "%"; + float et = (float) (System.currentTimeMillis() - startTime) / (float) 1000; + String elapsedTime = "ELAPSED TIME: " + secondsFormat.format(et) + " s"; + String footer = String.format(FOOTER_FORMAT, + verticesSummary, progressBar, progressStr, elapsedTime); + return footer; + } + + // [==================>>-----] + private String getInPlaceProgressBar(float percent) { + StringBuilder bar = new StringBuilder("["); + int remainingChars = progressBarChars - 4; + int completed = (int) (remainingChars * percent); + int pending = remainingChars - completed; + for (int i = 0; i < completed; i++) { + bar.append("="); + } + bar.append(">>"); + for (int i = 0; i < pending; i++) { + bar.append("-"); + } + bar.append("]"); + return bar.toString(); + } + private String printStatus(Map progressMap, String lastReport, LogHelper console) { + String report = getReport(progressMap); + if (!report.equals(lastReport) || System.currentTimeMillis() >= lastPrintTime + printInterval) { + console.printInfo(report); + lastPrintTime = System.currentTimeMillis(); + } + return report; + } + + private String getReport(Map progressMap) { StringBuffer reportBuffer = new StringBuffer(); SortedSet keys = new TreeSet(progressMap.keySet()); @@ -231,15 +750,28 @@ final int complete = progress.getSucceededTaskCount(); final int total = progress.getTotalTaskCount(); final int running = progress.getRunningTaskCount(); - final int failed = progress.getFailedTaskCount(); + final int failed = progress.getFailedTaskAttemptCount(); if (total <= 0) { reportBuffer.append(String.format("%s: -/-\t", s, complete, total)); } else { if (complete == total && !completed.contains(s)) { completed.add(s); + + /* + * We may have missed the start of the vertex due to the 3 seconds interval + */ + if (!perfLogger.startTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); } if(complete < total && (complete > 0 || running > 0 || failed > 0)) { + + if (!perfLogger.startTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + /* vertex is started, but not complete */ if (failed > 0) { reportBuffer.append(String.format("%s: %d(+%d,-%d)/%d\t", s, complete, running, failed, total)); @@ -258,12 +790,6 @@ } } - String report = reportBuffer.toString(); - if (!report.equals(lastReport) || System.currentTimeMillis() >= lastPrintTime + printInterval) { - console.printInfo(report); - lastPrintTime = System.currentTimeMillis(); - } - - return report; + return reportBuffer.toString(); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java (working copy) @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.exec.tez; + import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; @@ -47,6 +48,7 @@ import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.tez.client.TezClient; import org.apache.tez.dag.api.PreWarmVertex; import org.apache.tez.dag.api.SessionNotRunning; @@ -352,7 +354,7 @@ // TODO: if this method is ever called on more than one jar, getting the dir and the // list need to be refactored out to be done only once. Path destFile = new Path(destDirPath.toString() + "/" + destFileName); - return utils.localizeResource(localFile, destFile, conf); + return utils.localizeResource(localFile, destFile, LocalResourceType.FILE, conf); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java (working copy) @@ -50,6 +50,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.tez.common.counters.CounterGroup; import org.apache.tez.common.counters.TezCounter; import org.apache.tez.common.counters.TezCounters; @@ -162,7 +163,7 @@ // finally monitor will print progress until the job is done TezJobMonitor monitor = new TezJobMonitor(); - rc = monitor.monitorExecution(client, ctx.getHiveTxnManager(), conf); + rc = monitor.monitorExecution(client, ctx.getHiveTxnManager(), conf, dag); // fetch the counters Set statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS); @@ -360,7 +361,10 @@ Map resourceMap = new HashMap(); if (additionalLr != null) { for (LocalResource lr: additionalLr) { - resourceMap.put(utils.getBaseName(lr), lr); + if (lr.getType() == LocalResourceType.FILE) { + // TEZ AM will only localize FILE (no script operators in the AM) + resourceMap.put(utils.getBaseName(lr), lr); + } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java (working copy) @@ -17,26 +17,25 @@ */ package org.apache.hadoop.hive.ql.exec.vector; -import org.apache.hadoop.hive.common.type.Decimal128; + +import java.math.BigInteger; + +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; -import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; public class DecimalColumnVector extends ColumnVector { /** - * A vector if Decimal128 objects. These are mutable and have fairly - * efficient operations on them. This will make it faster to load - * column vectors and perform decimal vector operations with decimal- - * specific VectorExpressions. + * A vector of HiveDecimalWritable objects. * * For high performance and easy access to this low-level structure, * the fields are public by design (as they are in other ColumnVector * types). */ - public Decimal128[] vector; + public HiveDecimalWritable[] vector; public short scale; public short precision; @@ -51,9 +50,9 @@ this.precision = (short) precision; this.scale = (short) scale; final int len = size; - vector = new Decimal128[len]; + vector = new HiveDecimalWritable[len]; for (int i = 0; i < len; i++) { - vector[i] = new Decimal128(0, this.scale); + vector[i] = new HiveDecimalWritable(HiveDecimal.ZERO); } } @@ -65,8 +64,7 @@ if (!noNulls && isNull[index]) { return NullWritable.get(); } else { - Decimal128 dec = vector[index]; - writableObj.set(HiveDecimal.create(dec.toBigDecimal())); + writableObj.set(vector[index]); return writableObj; } } @@ -78,22 +76,38 @@ @Override public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) { - vector[outElementNum].update(((DecimalColumnVector) inputVector).vector[inputElementNum]); - vector[outElementNum].changeScaleDestructive(scale); + HiveDecimal hiveDec = ((DecimalColumnVector) inputVector).vector[inputElementNum].getHiveDecimal(precision, scale); + if (hiveDec == null) { + noNulls = false; + isNull[outElementNum] = true; + } else { + vector[outElementNum].set(hiveDec); + } } - /** - * Check if the value at position i fits in the available precision, - * and convert the value to NULL if it does not. - */ - public void checkPrecisionOverflow(int i) { - try { - vector[i].checkPrecisionOverflow(precision); - } catch (ArithmeticException e) { + public void set(int elementNum, HiveDecimalWritable writeable) { + HiveDecimal hiveDec = writeable.getHiveDecimal(precision, scale); + if (hiveDec == null) { + noNulls = false; + isNull[elementNum] = true; + } else { + vector[elementNum].set(hiveDec); + } + } - // If the value won't fit in the available precision, the result is NULL + public void set(int elementNum, HiveDecimal hiveDec) { + HiveDecimal checkedDec = HiveDecimal.enforcePrecisionScale(hiveDec, precision, scale); + if (checkedDec == null) { noNulls = false; - isNull[i] = true; + isNull[elementNum] = true; + } else { + vector[elementNum].set(checkedDec); } } + + public void setNullDataValue(int elementNum) { + // E.g. For scale 2 the minimum is "0.01" + HiveDecimal minimumNonZeroValue = HiveDecimal.create(BigInteger.ONE, scale); + vector[elementNum].set(minimumNonZeroValue); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java (working copy) @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveVarchar; @@ -152,18 +151,12 @@ private static abstract class VectorDecimalColumnAssign extends VectorColumnAssignVectorBase { + protected void assignDecimal(HiveDecimal value, int index) { - outCol.vector[index].update(value.unscaledValue(), (byte) value.scale()); + outCol.set(index, value); } - - protected void assignDecimal(Decimal128 value, int index) { - outCol.vector[index].update(value); - } protected void assignDecimal(HiveDecimalWritable hdw, int index) { - byte[] internalStorage = hdw.getInternalStorage(); - int scale = hdw.getScale(); - - outCol.vector[index].fastUpdateFromInternalStorage(internalStorage, (short)scale); + outCol.set(index, hdw); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java (working copy) @@ -38,6 +38,9 @@ private VectorExpression conditionEvaluator = null; + // Temporary selected vector + private int[] temporarySelected = new int [VectorizedRowBatch.DEFAULT_SIZE]; + // filterMode is 1 if condition is always true, -1 if always false // and 0 if condition needs to be computed. transient private int filterMode = 0; @@ -82,8 +85,16 @@ public void processOp(Object row, int tag) throws HiveException { VectorizedRowBatch vrg = (VectorizedRowBatch) row; + + //The selected vector represents selected rows. + //Clone the selected vector + System.arraycopy(vrg.selected, 0, temporarySelected, 0, vrg.size); + int [] selectedBackup = vrg.selected; + vrg.selected = temporarySelected; + int sizeBackup = vrg.size; + boolean selectedInUseBackup = vrg.selectedInUse; + //Evaluate the predicate expression - //The selected vector represents selected rows. switch (filterMode) { case 0: conditionEvaluator.evaluate(vrg); @@ -99,6 +110,11 @@ if (vrg.size > 0) { forward(vrg, null); } + + // Restore the original selected vector + vrg.selected = selectedBackup; + vrg.size = sizeBackup; + vrg.selectedInUse = selectedInUseBackup; } static public String getOperatorName() { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java (working copy) @@ -76,8 +76,6 @@ // Create a new outgoing vectorization context because column name map will change. private VectorizationContext vOutContext = null; - private String fileKey; - // The above members are initialized by the constructor and must not be // transient. //--------------------------------------------------------------------------- @@ -756,7 +754,6 @@ vOutContext = new VectorizationContext(desc.getOutputColumnNames()); vOutContext.setFileKey(vContext.getFileKey() + "/_GROUPBY_"); - fileKey = vOutContext.getFileKey(); } public VectorGroupByOperator() { @@ -796,7 +793,7 @@ outputFieldNames, objectInspectors); if (isVectorOutput) { vrbCtx = new VectorizedRowBatchCtx(); - vrbCtx.init(hconf, fileKey, (StructObjectInspector) outputObjInspector); + vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) outputObjInspector); outputBatch = vrbCtx.createVectorizedRowBatch(); vectorColumnAssign = VectorColumnAssignFactory.buildAssigners( outputBatch, outputObjInspector, vOutContext.getProjectionColumnMap(), conf.getOutputColumnNames()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java (working copy) @@ -113,9 +113,9 @@ DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[keyIndex]; if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) { - // Since we store references to Decimal128 instances, we must use the update method instead + // Since we store references to HiveDecimalWritable instances, we must use the update method instead // of plain assignment. - outputColumnVector.vector[outputBatch.size].update(inputColumnVector.vector[0]); + outputColumnVector.set(outputBatch.size, inputColumnVector.vector[0]); } else { outputColumnVector.noNulls = false; outputColumnVector.isNull[outputBatch.size] = true; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java (working copy) @@ -20,7 +20,8 @@ import java.util.Arrays; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.KeyWrapper; import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -40,7 +41,7 @@ private static final long[] EMPTY_LONG_ARRAY = new long[0]; private static final double[] EMPTY_DOUBLE_ARRAY = new double[0]; private static final byte[][] EMPTY_BYTES_ARRAY = new byte[0][]; - private static final Decimal128[] EMPTY_DECIMAL_ARRAY = new Decimal128[0]; + private static final HiveDecimalWritable[] EMPTY_DECIMAL_ARRAY = new HiveDecimalWritable[0]; private long[] longValues; private double[] doubleValues; @@ -49,7 +50,7 @@ private int[] byteStarts; private int[] byteLengths; - private Decimal128[] decimalValues; + private HiveDecimalWritable[] decimalValues; private boolean[] isNull; private int hashcode; @@ -58,9 +59,9 @@ int byteValuesCount, int decimalValuesCount) { longValues = longValuesCount > 0 ? new long[longValuesCount] : EMPTY_LONG_ARRAY; doubleValues = doubleValuesCount > 0 ? new double[doubleValuesCount] : EMPTY_DOUBLE_ARRAY; - decimalValues = decimalValuesCount > 0 ? new Decimal128[decimalValuesCount] : EMPTY_DECIMAL_ARRAY; + decimalValues = decimalValuesCount > 0 ? new HiveDecimalWritable[decimalValuesCount] : EMPTY_DECIMAL_ARRAY; for(int i = 0; i < decimalValuesCount; ++i) { - decimalValues[i] = new Decimal128(); + decimalValues[i] = new HiveDecimalWritable(HiveDecimal.ZERO); } if (byteValuesCount > 0) { byteValues = new byte[byteValuesCount][]; @@ -87,9 +88,12 @@ public void setHashKey() { hashcode = Arrays.hashCode(longValues) ^ Arrays.hashCode(doubleValues) ^ - Arrays.hashCode(decimalValues) ^ Arrays.hashCode(isNull); + for (int i = 0; i < decimalValues.length; i++) { + hashcode ^= decimalValues[i].getHiveDecimal().hashCode(); + } + // This code, with branches and all, is not executed if there are no string keys for (int i = 0; i < byteValues.length; ++i) { /* @@ -161,27 +165,36 @@ } public void duplicateTo(VectorHashKeyWrapper clone) { - clone.longValues = longValues.clone(); - clone.doubleValues = doubleValues.clone(); + clone.longValues = (longValues.length > 0) ? longValues.clone() : EMPTY_LONG_ARRAY; + clone.doubleValues = (doubleValues.length > 0) ? doubleValues.clone() : EMPTY_DOUBLE_ARRAY; clone.isNull = isNull.clone(); - // Decimal128 requires deep clone - clone.decimalValues = new Decimal128[decimalValues.length]; - for(int i = 0; i < decimalValues.length; ++i) { - clone.decimalValues[i] = new Decimal128().update(decimalValues[i]); + if (decimalValues.length > 0) { + // Decimal columns use HiveDecimalWritable. + clone.decimalValues = new HiveDecimalWritable[decimalValues.length]; + for(int i = 0; i < decimalValues.length; ++i) { + clone.decimalValues[i] = new HiveDecimalWritable(decimalValues[i]); + } + } else { + clone.decimalValues = EMPTY_DECIMAL_ARRAY; } - clone.byteValues = new byte[byteValues.length][]; - clone.byteStarts = new int[byteValues.length]; - clone.byteLengths = byteLengths.clone(); - for (int i = 0; i < byteValues.length; ++i) { - // avoid allocation/copy of nulls, because it potentially expensive. branch instead. - if (!isNull[longValues.length + doubleValues.length + i]) { - clone.byteValues[i] = Arrays.copyOfRange( - byteValues[i], - byteStarts[i], - byteStarts[i] + byteLengths[i]); + if (byteLengths.length > 0) { + clone.byteValues = new byte[byteValues.length][]; + clone.byteStarts = new int[byteValues.length]; + clone.byteLengths = byteLengths.clone(); + for (int i = 0; i < byteValues.length; ++i) { + // avoid allocation/copy of nulls, because it potentially expensive. + // branch instead. + if (!isNull[longValues.length + doubleValues.length + i]) { + clone.byteValues[i] = Arrays.copyOfRange(byteValues[i], + byteStarts[i], byteStarts[i] + byteLengths[i]); + } } + } else { + clone.byteValues = EMPTY_BYTES_ARRAY; + clone.byteStarts = EMPTY_INT_ARRAY; + clone.byteLengths = EMPTY_INT_ARRAY; } clone.hashcode = hashcode; assert clone.equals(this); @@ -234,8 +247,8 @@ isNull[longValues.length + doubleValues.length + index] = true; } - public void assignDecimal(int index, Decimal128 value) { - decimalValues[index].update(value); + public void assignDecimal(int index, HiveDecimalWritable value) { + decimalValues[index].set(value); isNull[longValues.length + doubleValues.length + byteValues.length + index] = false; } @@ -299,7 +312,7 @@ return isNull[longValues.length + doubleValues.length + byteValues.length + i]; } - public Decimal128 getDecimal(int i) { + public HiveDecimalWritable getDecimal(int i) { return decimalValues[i]; } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java (working copy) @@ -580,7 +580,7 @@ } else if (klh.decimalIndex >= 0) { return kw.getIsDecimalNull(klh.decimalIndex)? null : keyOutputWriter.writeValue( - kw.getDecimal(klh.decimalIndex)); + kw.getDecimal(klh.decimalIndex).getHiveDecimal()); } else { throw new HiveException(String.format( Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java (working copy) @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; @@ -53,20 +52,17 @@ */ private static final long serialVersionUID = 1L; - /** - * Vectorizaiton context key - * Used to retrieve column map from the MapTask scratch - */ - private String fileKey; - private int tagLen; - private VectorExpression[] keyExpressions; - private transient VectorHashKeyWrapperBatch keyWrapperBatch; - private transient VectorExpressionWriter[] keyOutputWriters; private VectorExpression[] bigTableFilterExpressions; private VectorExpression[] bigTableValueExpressions; + + private VectorizationContext vOutContext; + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + private transient VectorizedRowBatch outputBatch; private transient VectorExpressionWriter[] valueWriters; private transient Map outputVectorAssigners; @@ -76,8 +72,9 @@ // private transient int batchIndex; private transient VectorHashKeyWrapper[] keyValues; - - private transient VectorizationContext vOutContext = null; + private transient VectorHashKeyWrapperBatch keyWrapperBatch; + private transient VectorExpressionWriter[] keyOutputWriters; + private transient VectorizedRowBatchCtx vrbCtx = null; public VectorMapJoinOperator() { @@ -96,7 +93,6 @@ numAliases = desc.getExprs().size(); posBigTable = (byte) desc.getPosBigTable(); filterMaps = desc.getFilterMap(); - tagLen = desc.getTagLength(); noOuterJoin = desc.isNoOuterJoin(); Map> filterExpressions = desc.getFilters(); @@ -113,7 +109,6 @@ // We are making a new output vectorized row batch. vOutContext = new VectorizationContext(desc.getOutputColumnNames()); vOutContext.setFileKey(vContext.getFileKey() + "/MAP_JOIN_" + desc.getBigTableAlias()); - this.fileKey = vOutContext.getFileKey(); } @Override @@ -124,7 +119,7 @@ keyOutputWriters = VectorExpressionWriterFactory.getExpressionWriters(keyDesc); vrbCtx = new VectorizedRowBatchCtx(); - vrbCtx.init(hconf, this.fileKey, (StructObjectInspector) this.outputObjInspector); + vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector); outputBatch = vrbCtx.createVectorizedRowBatch(); @@ -193,10 +188,8 @@ Object[] values = (Object[]) row; VectorColumnAssign[] vcas = outputVectorAssigners.get(outputOI); if (null == vcas) { - Map> allColumnMaps = Utilities.getAllColumnVectorMaps(hconf); - Map columnMap = allColumnMaps.get(fileKey); vcas = VectorColumnAssignFactory.buildAssigners( - outputBatch, outputOI, columnMap, conf.getOutputColumnNames()); + outputBatch, outputOI, vOutContext.getProjectionColumnMap(), conf.getOutputColumnNames()); outputVectorAssigners.put(outputOI, vcas); } for (int i=0; i outputVectorAssigners; @@ -98,7 +99,6 @@ numAliases = desc.getExprs().size(); posBigTable = (byte) desc.getPosBigTable(); filterMaps = desc.getFilterMap(); - tagLen = desc.getTagLength(); noOuterJoin = desc.isNoOuterJoin(); // Must obtain vectorized equivalents for filter and value expressions @@ -117,7 +117,6 @@ // We are making a new output vectorized row batch. vOutContext = new VectorizationContext(desc.getOutputColumnNames()); vOutContext.setFileKey(vContext.getFileKey() + "/SMB_JOIN_" + desc.getBigTableAlias()); - this.fileKey = vOutContext.getFileKey(); } @Override @@ -135,7 +134,7 @@ super.initializeOp(hconf); vrbCtx = new VectorizedRowBatchCtx(); - vrbCtx.init(hconf, this.fileKey, (StructObjectInspector) this.outputObjInspector); + vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector); outputBatch = vrbCtx.createVectorizedRowBatch(); @@ -272,10 +271,8 @@ Object[] values = (Object[]) row; VectorColumnAssign[] vcas = outputVectorAssigners.get(outputOI); if (null == vcas) { - Map> allColumnMaps = Utilities.getAllColumnVectorMaps(hconf); - Map columnMap = allColumnMaps.get(fileKey); vcas = VectorColumnAssignFactory.buildAssigners( - outputBatch, outputOI, columnMap, conf.getOutputColumnNames()); + outputBatch, outputOI, vOutContext.getProjectionColumnMap(), conf.getOutputColumnNames()); outputVectorAssigners.put(outputOI, vcas); } for (int i = 0; i < values.length; ++i) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.vector; import java.lang.reflect.Constructor; +import java.math.BigDecimal; import java.sql.Date; import java.sql.Timestamp; import java.util.ArrayList; @@ -34,7 +35,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveVarchar; @@ -868,7 +869,7 @@ case FLOAT_FAMILY: return new ConstantVectorExpression(outCol, ((Number) constantValue).doubleValue()); case DECIMAL: - VectorExpression ve = new ConstantVectorExpression(outCol, (Decimal128) constantValue); + VectorExpression ve = new ConstantVectorExpression(outCol, (HiveDecimal) constantValue); // Set type name with decimal precision, scale, etc. ve.setOutputType(typeName); return ve; @@ -1237,9 +1238,9 @@ ((IDoubleInExpr) expr).setInListValues(inValsD); } else if (isDecimalFamily(colType)) { cl = (mode == Mode.FILTER ? FilterDecimalColumnInList.class : DecimalColumnInList.class); - Decimal128[] inValsD = new Decimal128[childrenForInList.size()]; + HiveDecimal[] inValsD = new HiveDecimal[childrenForInList.size()]; for (int i = 0; i != inValsD.length; i++) { - inValsD[i] = (Decimal128) getVectorTypeScalarValue( + inValsD[i] = (HiveDecimal) getVectorTypeScalarValue( (ExprNodeConstantDesc) childrenForInList.get(i)); } expr = createVectorExpression(cl, childExpr.subList(0, 1), Mode.PROJECTION, returnType); @@ -1287,44 +1288,43 @@ return null; } - private Decimal128 castConstantToDecimal(Object scalar, TypeInfo type) throws HiveException { + private HiveDecimal castConstantToDecimal(Object scalar, TypeInfo type) throws HiveException { PrimitiveTypeInfo ptinfo = (PrimitiveTypeInfo) type; + int scale = HiveDecimalUtils.getScaleForType(ptinfo); String typename = type.getTypeName(); - Decimal128 d = new Decimal128(); - int scale = HiveDecimalUtils.getScaleForType(ptinfo); + HiveDecimal rawDecimal; switch (ptinfo.getPrimitiveCategory()) { case FLOAT: - float floatVal = ((Float) scalar).floatValue(); - d.update(floatVal, (short) scale); + rawDecimal = HiveDecimal.create(String.valueOf((Float) scalar)); break; case DOUBLE: - double doubleVal = ((Double) scalar).doubleValue(); - d.update(doubleVal, (short) scale); + rawDecimal = HiveDecimal.create(String.valueOf((Double) scalar)); break; case BYTE: - byte byteVal = ((Byte) scalar).byteValue(); - d.update(byteVal, (short) scale); + rawDecimal = HiveDecimal.create((Byte) scalar); break; case SHORT: - short shortVal = ((Short) scalar).shortValue(); - d.update(shortVal, (short) scale); + rawDecimal = HiveDecimal.create((Short) scalar); break; case INT: - int intVal = ((Integer) scalar).intValue(); - d.update(intVal, (short) scale); + rawDecimal = HiveDecimal.create((Integer) scalar); break; case LONG: - long longVal = ((Long) scalar).longValue(); - d.update(longVal, (short) scale); + rawDecimal = HiveDecimal.create((Long) scalar); break; case DECIMAL: - HiveDecimal decimalVal = (HiveDecimal) scalar; - d.update(decimalVal.unscaledValue(), (short) scale); + rawDecimal = (HiveDecimal) scalar; break; default: - throw new HiveException("Unsupported type "+typename+" for cast to Decimal128"); + throw new HiveException("Unsupported type " + typename + " for cast to HiveDecimal"); } - return d; + if (rawDecimal == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Casting constant scalar " + scalar + " to HiveDecimal resulted in null"); + } + return null; + } + return rawDecimal; } private String castConstantToString(Object scalar, TypeInfo type) throws HiveException { @@ -1391,7 +1391,7 @@ if (child instanceof ExprNodeConstantDesc) { // Return a constant vector expression Object constantValue = ((ExprNodeConstantDesc) child).getValue(); - Decimal128 decimalValue = castConstantToDecimal(constantValue, child.getTypeInfo()); + HiveDecimal decimalValue = castConstantToDecimal(constantValue, child.getTypeInfo()); return getConstantVectorExpression(decimalValue, returnType, Mode.PROJECTION); } else if (child instanceof ExprNodeNullDesc) { return getConstantVectorExpression(null, returnType, Mode.PROJECTION); @@ -1801,10 +1801,7 @@ return 0; } } else if (decimalTypePattern.matcher(constDesc.getTypeString()).matches()) { - HiveDecimal hd = (HiveDecimal) constDesc.getValue(); - Decimal128 dvalue = new Decimal128(); - dvalue.update(hd.unscaledValue(), (short) hd.scale()); - return dvalue; + return (HiveDecimal) constDesc.getValue(); } else { return constDesc.getValue(); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java (working copy) @@ -481,8 +481,7 @@ if (writableCol != null) { dcv.isNull[rowIndex] = false; HiveDecimalWritable wobj = (HiveDecimalWritable) writableCol; - dcv.vector[rowIndex].update(wobj.getHiveDecimal().unscaledValue(), - (short) wobj.getScale()); + dcv.set(rowIndex, wobj); } else { setNullColIsNullValue(dcv, rowIndex); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java (working copy) @@ -34,7 +34,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -96,7 +95,7 @@ // list does not contain partition columns private List colsToInclude; - private Map columnTypeMap = null; + private Map scratchColumnTypeMap = null; /** * Constructor for VectorizedRowBatchCtx @@ -126,36 +125,17 @@ public VectorizedRowBatchCtx() { } - - /** - * Initializes the VectorizedRowBatch context based on an arbitrary object inspector - * Used by non-tablescan operators when they change the vectorization context - * @param hiveConf - * @param fileKey - * The key on which to retrieve the extra column mapping from the map/reduce scratch - * @param rowOI - * Object inspector that shapes the column types - */ - public void init(Configuration hiveConf, String fileKey, - StructObjectInspector rowOI) { - Map> scratchColumnVectorTypes = - Utilities.getAllScratchColumnVectorTypeMaps(hiveConf); - columnTypeMap = scratchColumnVectorTypes.get(fileKey); - this.rowOI= rowOI; - this.rawRowOI = rowOI; - } - /** * Initializes the VectorizedRowBatch context based on an scratch column type map and * object inspector. - * @param columnTypeMap + * @param scratchColumnTypeMap * @param rowOI * Object inspector that shapes the column types */ - public void init(Map columnTypeMap, + public void init(Map scratchColumnTypeMap, StructObjectInspector rowOI) { - this.columnTypeMap = columnTypeMap; + this.scratchColumnTypeMap = scratchColumnTypeMap; this.rowOI= rowOI; this.rawRowOI = rowOI; } @@ -179,7 +159,8 @@ IOException, SerDeException, InstantiationException, - IllegalAccessException, HiveException { + IllegalAccessException, + HiveException { Map pathToPartitionInfo = Utilities .getMapRedWork(hiveConf).getMapWork().getPathToPartitionInfo(); @@ -189,8 +170,8 @@ split.getPath(), IOPrepareCache.get().getPartitionDescMap()); String partitionPath = split.getPath().getParent().toString(); - columnTypeMap = Utilities - .getAllScratchColumnVectorTypeMaps(hiveConf) + scratchColumnTypeMap = Utilities + .getMapWorkAllScratchColumnVectorTypeMaps(hiveConf) .get(partitionPath); Properties partProps = @@ -557,7 +538,7 @@ dv.isRepeating = true; } else { HiveDecimal hd = (HiveDecimal) value; - dv.vector[0] = new Decimal128(hd.toString(), (short) hd.scale()); + dv.set(0, hd); dv.isRepeating = true; dv.isNull[0] = false; } @@ -613,12 +594,12 @@ } private void addScratchColumnsToBatch(VectorizedRowBatch vrb) throws HiveException { - if (columnTypeMap != null && !columnTypeMap.isEmpty()) { + if (scratchColumnTypeMap != null && !scratchColumnTypeMap.isEmpty()) { int origNumCols = vrb.numCols; - int newNumCols = vrb.cols.length+columnTypeMap.keySet().size(); + int newNumCols = vrb.cols.length+scratchColumnTypeMap.keySet().size(); vrb.cols = Arrays.copyOf(vrb.cols, newNumCols); for (int i = origNumCols; i < newNumCols; i++) { - String typeName = columnTypeMap.get(i); + String typeName = scratchColumnTypeMap.get(i); if (typeName == null) { throw new HiveException("No type found for column type entry " + i); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToBoolean.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToBoolean.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToBoolean.java (working copy) @@ -41,6 +41,6 @@ * Otherwise, return 1 for true. */ protected void func(LongColumnVector outV, DecimalColumnVector inV, int i) { - outV.vector[i] = inV.vector[i].getSignum() == 0 ? 0 : 1; + outV.vector[i] = inV.vector[i].getHiveDecimal().signum() == 0 ? 0 : 1; } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java (working copy) @@ -52,9 +52,8 @@ * at position i in the respective vectors. */ protected void convert(DecimalColumnVector outV, DecimalColumnVector inV, int i) { - outV.vector[i].update(inV.vector[i]); - outV.vector[i].changeScaleDestructive(outV.scale); - outV.checkPrecisionOverflow(i); + // The set routine enforces precision and scale. + outV.vector[i].set(inV.vector[i]); } /** Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDouble.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDouble.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDouble.java (working copy) @@ -34,6 +34,6 @@ } protected void func(DoubleColumnVector outV, DecimalColumnVector inV, int i) { - outV.vector[i] = inV.vector[i].doubleValue(); + outV.vector[i] = inV.vector[i].getHiveDecimal().doubleValue(); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToLong.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToLong.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToLong.java (working copy) @@ -37,6 +37,6 @@ @Override protected void func(LongColumnVector outV, DecimalColumnVector inV, int i) { - outV.vector[i] = inV.vector[i].longValue(); + outV.vector[i] = inV.vector[i].getHiveDecimal().longValue(); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToString.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToString.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToString.java (working copy) @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalToStringUnaryUDF; /** * To support vectorized cast of decimal to string. @@ -43,7 +44,7 @@ @Override protected void func(BytesColumnVector outV, DecimalColumnVector inV, int i) { - String s = inV.vector[i].getHiveDecimalString(); + String s = inV.vector[i].getHiveDecimal().toString(); byte[] b = null; try { b = s.getBytes("UTF-8"); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToTimestamp.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToTimestamp.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToTimestamp.java (working copy) @@ -18,8 +18,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; -import org.apache.hadoop.hive.common.type.SqlMathUtil; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; @@ -31,34 +30,23 @@ public class CastDecimalToTimestamp extends FuncDecimalToLong { private static final long serialVersionUID = 1L; - /* The field tmp is a scratch variable for this operation. It is - * purposely not made static because if this code is ever made multi-threaded, - * each thread will then have its own VectorExpression tree and thus - * its own copy of the variable. - */ - private transient Decimal128 tmp = null; - private static transient Decimal128 tenE9 = new Decimal128(1000000000); + private static transient HiveDecimal tenE9 = HiveDecimal.create(1000000000); public CastDecimalToTimestamp(int inputColumn, int outputColumn) { super(inputColumn, outputColumn); - tmp = new Decimal128(0); } public CastDecimalToTimestamp() { - - // initialize local field after deserialization - tmp = new Decimal128(0); } @Override protected void func(LongColumnVector outV, DecimalColumnVector inV, int i) { - tmp.update(inV.vector[i]); - - // Reduce scale at most by 9, therefore multiplication will not require rounding. - int newScale = inV.scale > 9 ? (inV.scale - 9) : 0; - tmp.multiplyDestructive(tenE9, (short) newScale); - - // set output - outV.vector[i] = tmp.longValue(); + HiveDecimal result = inV.vector[i].getHiveDecimal().multiply(tenE9); + if (result == null) { + outV.noNulls = false; + outV.isNull[i] = true; + } else { + outV.vector[i] = result.longValue(); + } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToDecimal.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; @@ -38,7 +39,7 @@ @Override protected void func(DecimalColumnVector outV, DoubleColumnVector inV, int i) { - outV.vector[i].update(inV.vector[i], outV.scale); - outV.checkPrecisionOverflow(i); + String s = ((Double) inV.vector[i]).toString(); + outV.vector[i].set(HiveDecimal.create(s)); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDecimal.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; @@ -40,7 +41,6 @@ @Override protected void func(DecimalColumnVector outV, LongColumnVector inV, int i) { - outV.vector[i].update(inV.vector[i], outV.scale); - outV.checkPrecisionOverflow(i); + outV.vector[i].set(HiveDecimal.create(inV.vector[i])); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; @@ -58,14 +59,13 @@ * making a new string. */ s = new String(inV.vector[i], inV.start[i], inV.length[i], "UTF-8"); - outV.vector[i].update(s, outV.scale); + outV.vector[i].set(HiveDecimal.create(s)); } catch (Exception e) { // for any exception in conversion to decimal, produce NULL outV.noNulls = false; outV.isNull[i] = true; } - outV.checkPrecisionOverflow(i); } @Override Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDecimal.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; @@ -39,9 +40,10 @@ @Override protected void func(DecimalColumnVector outV, LongColumnVector inV, int i) { - // the resulting decimal value is 10e-9 * the input long value. - outV.vector[i].updateFixedPoint(inV.vector[i], (short) 9); - outV.vector[i].changeScaleDestructive(outV.scale); - outV.checkPrecisionOverflow(i); + // The resulting decimal value is 10e-9 * the input long value (i.e. seconds). + // + HiveDecimal result = HiveDecimal.create(inV.vector[i]); + result = result.scaleByPowerOfTen(-9); + outV.set(i, result); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java (working copy) @@ -20,7 +20,7 @@ import java.util.Arrays; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.ql.exec.vector.*; @@ -44,7 +44,7 @@ protected long longValue = 0; private double doubleValue = 0; private byte[] bytesValue = null; - private Decimal128 decimalValue = null; + private HiveDecimal decimalValue = null; private boolean isNullValue = false; private Type type; @@ -85,7 +85,7 @@ setBytesValue(value.getValue().getBytes()); } - public ConstantVectorExpression(int outputColumn, Decimal128 value) { + public ConstantVectorExpression(int outputColumn, HiveDecimal value) { this(outputColumn, "decimal"); setDecimalValue(value); } @@ -137,7 +137,7 @@ dcv.isRepeating = true; dcv.noNulls = !isNullValue; if (!isNullValue) { - dcv.vector[0].update(decimalValue); + dcv.vector[0].set(decimalValue); } else { dcv.isNull[0] = true; } @@ -191,7 +191,7 @@ this.bytesValueLength = bytesValue.length; } - public void setDecimalValue(Decimal128 decimalValue) { + public void setDecimalValue(HiveDecimal decimalValue) { this.decimalValue = decimalValue; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java (working copy) @@ -18,11 +18,12 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Descriptor; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import java.util.HashSet; @@ -32,11 +33,11 @@ public class DecimalColumnInList extends VectorExpression implements IDecimalInExpr { private static final long serialVersionUID = 1L; private int inputCol; - private Decimal128[] inListValues; + private HiveDecimal[] inListValues; private int outputColumn; // The set object containing the IN list. - private transient HashSet inSet; + private transient HashSet inSet; public DecimalColumnInList() { super(); @@ -60,8 +61,8 @@ } if (inSet == null) { - inSet = new HashSet(inListValues.length); - for (Decimal128 val : inListValues) { + inSet = new HashSet(inListValues.length); + for (HiveDecimal val : inListValues) { inSet.add(val); } } @@ -72,7 +73,7 @@ boolean[] nullPos = inputColVector.isNull; boolean[] outNulls = outputColVector.isNull; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; long[] outputVector = outputColVector.vector; // return immediately if batch is empty @@ -87,16 +88,16 @@ // All must be selected otherwise size would be zero // Repeating property will not change. - outputVector[0] = inSet.contains(vector[0]) ? 1 : 0; + outputVector[0] = inSet.contains(vector[0].getHiveDecimal()) ? 1 : 0; outputColVector.isRepeating = true; } else if (batch.selectedInUse) { for(int j = 0; j != n; j++) { int i = sel[j]; - outputVector[i] = inSet.contains(vector[i]) ? 1 : 0; + outputVector[i] = inSet.contains(vector[i].getHiveDecimal()) ? 1 : 0; } } else { for(int i = 0; i != n; i++) { - outputVector[i] = inSet.contains(vector[i]) ? 1 : 0; + outputVector[i] = inSet.contains(vector[i].getHiveDecimal()) ? 1 : 0; } } } else { @@ -105,7 +106,7 @@ //All must be selected otherwise size would be zero //Repeating property will not change. if (!nullPos[0]) { - outputVector[0] = inSet.contains(vector[0]) ? 1 : 0; + outputVector[0] = inSet.contains(vector[0].getHiveDecimal()) ? 1 : 0; outNulls[0] = false; } else { outNulls[0] = true; @@ -116,14 +117,14 @@ int i = sel[j]; outNulls[i] = nullPos[i]; if (!nullPos[i]) { - outputVector[i] = inSet.contains(vector[i]) ? 1 : 0; + outputVector[i] = inSet.contains(vector[i].getHiveDecimal()) ? 1 : 0; } } } else { System.arraycopy(nullPos, 0, outNulls, 0, n); for(int i = 0; i != n; i++) { if (!nullPos[i]) { - outputVector[i] = inSet.contains(vector[i]) ? 1 : 0; + outputVector[i] = inSet.contains(vector[i].getHiveDecimal()) ? 1 : 0; } } } @@ -148,11 +149,7 @@ return null; } - public Decimal128[] getInListValues() { - return this.inListValues; - } - - public void setInListValues(Decimal128[] a) { + public void setInListValues(HiveDecimal[] a) { this.inListValues = a; } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java (working copy) @@ -18,152 +18,343 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; -import org.apache.hadoop.hive.common.type.SqlMathUtil; -import org.apache.hadoop.hive.common.type.UnsignedInt128; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.udf.generic.RoundUtils; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; /** * Utility functions for vector operations on decimal values. */ public class DecimalUtil { - public static final Decimal128 DECIMAL_ONE = new Decimal128(); - private static final UnsignedInt128 scratchUInt128 = new UnsignedInt128(); + public static int compare(HiveDecimalWritable writableLeft, HiveDecimal right) { + return writableLeft.getHiveDecimal().compareTo(right); + } - static { - DECIMAL_ONE.update(1L, (short) 0); + public static int compare(HiveDecimal left, HiveDecimalWritable writableRight) { + return left.compareTo(writableRight.getHiveDecimal()); } // Addition with overflow check. Overflow produces NULL output. - public static void addChecked(int i, Decimal128 left, Decimal128 right, + public static void addChecked(int i, HiveDecimal left, HiveDecimal right, DecimalColumnVector outputColVector) { try { - Decimal128.add(left, right, outputColVector.vector[i], outputColVector.scale); - outputColVector.vector[i].checkPrecisionOverflow(outputColVector.precision); + outputColVector.set(i, left.add(right)); } catch (ArithmeticException e) { // catch on overflow outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } + public static void addChecked(int i, HiveDecimalWritable left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().add(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void addChecked(int i, HiveDecimalWritable left, HiveDecimal right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().add(right)); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void addChecked(int i, HiveDecimal left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.add(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + // Subtraction with overflow check. Overflow produces NULL output. - public static void subtractChecked(int i, Decimal128 left, Decimal128 right, + public static void subtractChecked(int i, HiveDecimal left, HiveDecimal right, DecimalColumnVector outputColVector) { try { - Decimal128.subtract(left, right, outputColVector.vector[i], outputColVector.scale); - outputColVector.vector[i].checkPrecisionOverflow(outputColVector.precision); + outputColVector.set(i, left.subtract(right)); } catch (ArithmeticException e) { // catch on overflow outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } + public static void subtractChecked(int i, HiveDecimalWritable left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().subtract(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void subtractChecked(int i, HiveDecimalWritable left, HiveDecimal right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().subtract(right)); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void subtractChecked(int i, HiveDecimal left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.subtract(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + // Multiplication with overflow check. Overflow produces NULL output. - public static void multiplyChecked(int i, Decimal128 left, Decimal128 right, + public static void multiplyChecked(int i, HiveDecimal left, HiveDecimal right, DecimalColumnVector outputColVector) { try { - Decimal128.multiply(left, right, outputColVector.vector[i], outputColVector.scale); - outputColVector.vector[i].checkPrecisionOverflow(outputColVector.precision); + outputColVector.set(i, left.multiply(right)); } catch (ArithmeticException e) { // catch on overflow outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } + public static void multiplyChecked(int i, HiveDecimalWritable left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().multiply(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void multiplyChecked(int i, HiveDecimalWritable left, HiveDecimal right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().multiply(right)); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void multiplyChecked(int i, HiveDecimal left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.multiply(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on overflow + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + // Division with overflow/zero-divide check. Error produces NULL output. - public static void divideChecked(int i, Decimal128 left, Decimal128 right, + public static void divideChecked(int i, HiveDecimal left, HiveDecimal right, DecimalColumnVector outputColVector) { try { - Decimal128.divide(left, right, outputColVector.vector[i], outputColVector.scale); - outputColVector.vector[i].checkPrecisionOverflow(outputColVector.precision); + outputColVector.set(i, left.divide(right)); } catch (ArithmeticException e) { // catch on error outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } + public static void divideChecked(int i, HiveDecimalWritable left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().divide(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on error + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void divideChecked(int i, HiveDecimalWritable left, HiveDecimal right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().divide(right)); + } catch (ArithmeticException e) { // catch on error + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void divideChecked(int i, HiveDecimal left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.divide(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on error + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + // Modulo operator with overflow/zero-divide check. - public static void moduloChecked(int i, Decimal128 left, Decimal128 right, + public static void moduloChecked(int i, HiveDecimal left, HiveDecimal right, DecimalColumnVector outputColVector) { try { - Decimal128.modulo(left, right, outputColVector.vector[i], outputColVector.scale); - outputColVector.vector[i].checkPrecisionOverflow(outputColVector.precision); + outputColVector.set(i, left.remainder(right)); } catch (ArithmeticException e) { // catch on error outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } - public static void floor(int i, Decimal128 input, DecimalColumnVector outputColVector) { + public static void moduloChecked(int i, HiveDecimalWritable left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { try { - Decimal128 result = outputColVector.vector[i]; - result.update(input); - result.zeroFractionPart(scratchUInt128); - result.changeScaleDestructive(outputColVector.scale); - if ((result.compareTo(input) != 0) && input.getSignum() < 0) { - result.subtractDestructive(DECIMAL_ONE, outputColVector.scale); - } + outputColVector.set(i, left.getHiveDecimal().remainder(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on error + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void moduloChecked(int i, HiveDecimalWritable left, HiveDecimal right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.getHiveDecimal().remainder(right)); + } catch (ArithmeticException e) { // catch on error + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void moduloChecked(int i, HiveDecimal left, HiveDecimalWritable right, + DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, left.remainder(right.getHiveDecimal())); + } catch (ArithmeticException e) { // catch on error + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void floor(int i, HiveDecimal input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.setScale(0, HiveDecimal.ROUND_FLOOR)); } catch (ArithmeticException e) { outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } - public static void ceiling(int i, Decimal128 input, DecimalColumnVector outputColVector) { + public static void floor(int i, HiveDecimalWritable input, DecimalColumnVector outputColVector) { try { - Decimal128 result = outputColVector.vector[i]; - result.update(input); - result.zeroFractionPart(scratchUInt128); - result.changeScaleDestructive(outputColVector.scale); - if ((result.compareTo(input) != 0) && input.getSignum() > 0) { - result.addDestructive(DECIMAL_ONE, outputColVector.scale); - } + outputColVector.set(i, input.getHiveDecimal().setScale(0, HiveDecimal.ROUND_FLOOR)); } catch (ArithmeticException e) { outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } - public static void round(int i, Decimal128 input, DecimalColumnVector outputColVector) { - HiveDecimal inputHD = HiveDecimal.create(input.toBigDecimal()); - HiveDecimal result = RoundUtils.round(inputHD, outputColVector.scale); - if (result == null) { + public static void ceiling(int i, HiveDecimal input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.setScale(0, HiveDecimal.ROUND_CEILING)); + } catch (ArithmeticException e) { outputColVector.noNulls = false; outputColVector.isNull[i] = true; - } else { - outputColVector.vector[i].update(result.bigDecimalValue().toPlainString(), outputColVector.scale); } } - public static void sign(int i, Decimal128 input, LongColumnVector outputColVector) { - outputColVector.vector[i] = input.getSignum(); + public static void ceiling(int i, HiveDecimalWritable input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.getHiveDecimal().setScale(0, HiveDecimal.ROUND_CEILING)); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } } - public static void abs(int i, Decimal128 input, DecimalColumnVector outputColVector) { - Decimal128 result = outputColVector.vector[i]; + public static void round(int i, HiveDecimal input, int decimalPlaces, DecimalColumnVector outputColVector) { try { - result.update(input); - result.absDestructive(); - result.changeScaleDestructive(outputColVector.scale); + outputColVector.set(i, RoundUtils.round(input, decimalPlaces)); } catch (ArithmeticException e) { outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } - public static void negate(int i, Decimal128 input, DecimalColumnVector outputColVector) { - Decimal128 result = outputColVector.vector[i]; + public static void round(int i, HiveDecimalWritable input, int decimalPlaces, DecimalColumnVector outputColVector) { try { - result.update(input); - result.negateDestructive(); - result.changeScaleDestructive(outputColVector.scale); + outputColVector.set(i, RoundUtils.round(input.getHiveDecimal(), decimalPlaces)); } catch (ArithmeticException e) { outputColVector.noNulls = false; outputColVector.isNull[i] = true; } } + + public static void round(int i, HiveDecimal input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, RoundUtils.round(input, outputColVector.scale)); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void round(int i, HiveDecimalWritable input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, RoundUtils.round(input.getHiveDecimal(), outputColVector.scale)); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void sign(int i, HiveDecimal input, LongColumnVector outputColVector) { + outputColVector.vector[i] = input.signum(); + } + + public static void sign(int i, HiveDecimalWritable input, LongColumnVector outputColVector) { + outputColVector.vector[i] = input.getHiveDecimal().signum(); + } + + public static void abs(int i, HiveDecimal input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.abs()); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void abs(int i, HiveDecimalWritable input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.getHiveDecimal().abs()); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void negate(int i, HiveDecimal input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.negate()); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void negate(int i, HiveDecimalWritable input, DecimalColumnVector outputColVector) { + try { + outputColVector.set(i, input.getHiveDecimal().negate()); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java (working copy) @@ -18,10 +18,11 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Descriptor; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import java.util.HashSet; @@ -31,10 +32,10 @@ public class FilterDecimalColumnInList extends VectorExpression implements IDecimalInExpr { private static final long serialVersionUID = 1L; private int inputCol; - private Decimal128[] inListValues; + private HiveDecimal[] inListValues; // The set object containing the IN list. - private transient HashSet inSet; + private transient HashSet inSet; public FilterDecimalColumnInList() { super(); @@ -57,8 +58,8 @@ } if (inSet == null) { - inSet = new HashSet(inListValues.length); - for (Decimal128 val : inListValues) { + inSet = new HashSet(inListValues.length); + for (HiveDecimal val : inListValues) { inSet.add(val); } } @@ -67,7 +68,7 @@ int[] sel = batch.selected; boolean[] nullPos = inputColVector.isNull; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -80,7 +81,7 @@ // All must be selected otherwise size would be zero // Repeating property will not change. - if (!(inSet.contains(vector[0]))) { + if (!(inSet.contains(vector[0].getHiveDecimal()))) { //Entire batch is filtered out. batch.size = 0; } @@ -88,7 +89,7 @@ int newSize = 0; for(int j = 0; j != n; j++) { int i = sel[j]; - if (inSet.contains(vector[i])) { + if (inSet.contains(vector[i].getHiveDecimal())) { sel[newSize++] = i; } } @@ -96,7 +97,7 @@ } else { int newSize = 0; for(int i = 0; i != n; i++) { - if (inSet.contains(vector[i])) { + if (inSet.contains(vector[i].getHiveDecimal())) { sel[newSize++] = i; } } @@ -111,7 +112,7 @@ //All must be selected otherwise size would be zero //Repeating property will not change. if (!nullPos[0]) { - if (!inSet.contains(vector[0])) { + if (!inSet.contains(vector[0].getHiveDecimal())) { //Entire batch is filtered out. batch.size = 0; @@ -124,7 +125,7 @@ for(int j = 0; j != n; j++) { int i = sel[j]; if (!nullPos[i]) { - if (inSet.contains(vector[i])) { + if (inSet.contains(vector[i].getHiveDecimal())) { sel[newSize++] = i; } } @@ -136,7 +137,7 @@ int newSize = 0; for(int i = 0; i != n; i++) { if (!nullPos[i]) { - if (inSet.contains(vector[i])) { + if (inSet.contains(vector[i].getHiveDecimal())) { sel[newSize++] = i; } } @@ -167,11 +168,7 @@ return null; } - public Decimal128[] getInListValues() { - return this.inListValues; - } - - public void setInListValues(Decimal128[] a) { + public void setInListValues(HiveDecimal[] a) { this.inListValues = a; } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; @@ -117,18 +116,6 @@ return outputColumn; } - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - - public int getInputColumn() { - return inputColumn; - } - - public void setInputColumn(int inputColumn) { - this.inputColumn = inputColumn; - } - @Override public String getOutputType() { return "long"; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java (working copy) @@ -21,9 +21,9 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import java.util.Arrays; @@ -61,7 +61,7 @@ boolean[] outputIsNull = outputColVector.isNull; outputColVector.noNulls = inputColVector.noNulls; int n = batch.size; - Decimal128[] vector = inputColVector.vector; + HiveDecimalWritable[] vector = inputColVector.vector; // return immediately if batch is empty if (n == 0) { @@ -73,7 +73,7 @@ // All must be selected otherwise size would be zero // Repeating property will not change. outputIsNull[0] = inputIsNull[0]; - DecimalUtil.round(0, vector[0], outputColVector); + DecimalUtil.round(0, vector[0], decimalPlaces, outputColVector); outputColVector.isRepeating = true; } else if (inputColVector.noNulls) { if (batch.selectedInUse) { @@ -82,14 +82,14 @@ // Set isNull because decimal operation can yield a null. outputIsNull[i] = false; - DecimalUtil.round(i, vector[i], outputColVector); + DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector); } } else { // Set isNull because decimal operation can yield a null. Arrays.fill(outputIsNull, 0, n, false); for(int i = 0; i != n; i++) { - DecimalUtil.round(i, vector[i], outputColVector); + DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector); } } outputColVector.isRepeating = false; @@ -98,12 +98,12 @@ for(int j = 0; j != n; j++) { int i = sel[j]; outputIsNull[i] = inputIsNull[i]; - DecimalUtil.round(i, vector[i], outputColVector); + DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector); } } else { System.arraycopy(inputIsNull, 0, outputIsNull, 0, n); for(int i = 0; i != n; i++) { - DecimalUtil.round(i, vector[i], outputColVector); + DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector); } } outputColVector.isRepeating = false; @@ -119,28 +119,7 @@ public String getOutputType() { return outputType; } - - public int getColNum() { - return colNum; - } - public void setColNum(int colNum) { - this.colNum = colNum; - } - - public void setOutputColumn(int outputColumn) { - this.outputColumn = outputColumn; - } - - public int getDecimalPlaces() { - return decimalPlaces; - } - - public void setDecimalPlaces(int decimalPlaces) { - this.decimalPlaces = decimalPlaces; - } - - @Override public VectorExpressionDescriptor.Descriptor getDescriptor() { return (new VectorExpressionDescriptor.Builder()) Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IDecimalInExpr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IDecimalInExpr.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IDecimalInExpr.java (working copy) @@ -18,8 +18,8 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; public interface IDecimalInExpr { - void setInListValues(Decimal128[] inVals); + void setInListValues(HiveDecimal[] inVals); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java (working copy) @@ -300,18 +300,18 @@ if (v.noNulls) { return; } else if (v.isRepeating && v.isNull[0]) { - v.vector[0].setNullDataValue(); + v.setNullDataValue(0); } else if (selectedInUse) { for (int j = 0; j != n; j++) { int i = sel[j]; if(v.isNull[i]) { - v.vector[i].setNullDataValue(); + v.setNullDataValue(i); } } } else { for (int i = 0; i != n; i++) { if(v.isNull[i]) { - v.vector[i].setNullDataValue(); + v.setNullDataValue(i); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriter.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriter.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriter.java (working copy) @@ -18,9 +18,10 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.Writable; @@ -34,7 +35,8 @@ Object writeValue(long value) throws HiveException; Object writeValue(double value) throws HiveException; Object writeValue(byte[] value, int start, int length) throws HiveException; - Object writeValue(Decimal128 value) throws HiveException; + Object writeValue(HiveDecimalWritable value) throws HiveException; + Object writeValue(HiveDecimal value) throws HiveException; Object setValue(Object row, ColumnVector column, int columnRow) throws HiveException; Object initValue(Object ost) throws HiveException; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; -import java.math.BigDecimal; import java.sql.Date; import java.sql.Timestamp; import java.util.ArrayList; @@ -27,7 +26,6 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveVarchar; @@ -134,16 +132,31 @@ * The base implementation must be overridden by the Decimal specialization */ @Override - public Object writeValue(Decimal128 value) throws HiveException { + public Object writeValue(HiveDecimal value) throws HiveException { throw new HiveException("Internal error: should not reach here"); } /** * The base implementation must be overridden by the Decimal specialization */ - public Object setValue(Object field, Decimal128 value) throws HiveException { + @Override + public Object writeValue(HiveDecimalWritable value) throws HiveException { throw new HiveException("Internal error: should not reach here"); } + + /** + * The base implementation must be overridden by the Decimal specialization + */ + public Object setValue(Object field, HiveDecimalWritable value) throws HiveException { + throw new HiveException("Internal error: should not reach here"); + } + + /** + * The base implementation must be overridden by the Decimal specialization + */ + public Object setValue(Object field, HiveDecimal value) throws HiveException { + throw new HiveException("Internal error: should not reach here"); + } } /** @@ -465,24 +478,35 @@ } @Override - public Object writeValue(Decimal128 value) throws HiveException { - return ((SettableHiveDecimalObjectInspector) this.objectInspector).set(obj, - HiveDecimal.create(value.toBigDecimal())); + public Object writeValue(HiveDecimalWritable value) throws HiveException { + return ((SettableHiveDecimalObjectInspector) this.objectInspector).set(obj, value); } @Override - public Object setValue(Object field, Decimal128 value) { + public Object writeValue(HiveDecimal value) throws HiveException { + return ((SettableHiveDecimalObjectInspector) this.objectInspector).set(obj, value); + } + + @Override + public Object setValue(Object field, HiveDecimalWritable value) { if (null == field) { field = initValue(null); } - return ((SettableHiveDecimalObjectInspector) this.objectInspector).set(field, - HiveDecimal.create(value.toBigDecimal())); + return ((SettableHiveDecimalObjectInspector) this.objectInspector).set(field, value); } @Override + public Object setValue(Object field, HiveDecimal value) { + if (null == field) { + field = initValue(null); + } + return ((SettableHiveDecimalObjectInspector) this.objectInspector).set(field, value); + } + + @Override public Object initValue(Object ignored) { return ((SettableHiveDecimalObjectInspector) this.objectInspector).create( - HiveDecimal.create(BigDecimal.ZERO)); + HiveDecimal.ZERO); } }.init(fieldObjInspector); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java (working copy) @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression; import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow; @@ -41,7 +41,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; -import org.apache.hive.common.util.Decimal128FastBuffer; /** * Generated from template VectorUDAFAvg.txt. @@ -57,24 +56,45 @@ private static final long serialVersionUID = 1L; - transient private final Decimal128 sum = new Decimal128(); + transient private final HiveDecimalWritable sum = new HiveDecimalWritable(); transient private long count; transient private boolean isNull; - public void sumValueWithCheck(Decimal128 value, short scale) { + // We use this to catch overflow. + transient private boolean isOutOfRange; + + public void sumValueWithNullCheck(HiveDecimalWritable writable, short scale) { + if (isOutOfRange) { + return; + } + HiveDecimal value = writable.getHiveDecimal(); if (isNull) { - sum.update(value); - sum.changeScaleDestructive(scale); + sum.set(value); count = 1; isNull = false; } else { - sum.addDestructive(value, scale); + HiveDecimal result; + try { + result = sum.getHiveDecimal().add(value); + } catch (ArithmeticException e) { // catch on overflow + isOutOfRange = true; + return; + } + sum.set(result); count++; } } - public void sumValueNoCheck(Decimal128 value, short scale) { - sum.addDestructive(value, scale); + public void sumValueNoNullCheck(HiveDecimalWritable writable, short scale) { + HiveDecimal value = writable.getHiveDecimal(); + HiveDecimal result; + try { + result = sum.getHiveDecimal().add(value); + } catch (ArithmeticException e) { // catch on overflow + isOutOfRange = true; + return; + } + sum.set(result); count++; } @@ -87,7 +107,8 @@ @Override public void reset() { isNull = true; - sum.zeroClear(); + isOutOfRange = false; + sum.set(HiveDecimal.ZERO); count = 0L; } } @@ -98,8 +119,6 @@ transient private HiveDecimalWritable resultSum; transient private StructObjectInspector soi; - transient private final Decimal128FastBuffer scratch; - /** * The scale of the SUM in the partial output */ @@ -120,12 +139,6 @@ */ private short inputPrecision; - /** - * A value used as scratch to avoid allocating at runtime. - * Needed by computations like vector[0] * batchSize - */ - transient private Decimal128 scratchDecimal = new Decimal128(); - public VectorUDAFAvgDecimal(VectorExpression inputExpression) { this(); this.inputExpression = inputExpression; @@ -138,7 +151,6 @@ resultSum = new HiveDecimalWritable(); partialResult[0] = resultCount; partialResult[1] = resultSum; - scratch = new Decimal128FastBuffer(); } @@ -185,7 +197,7 @@ DecimalColumnVector inputVector = ( DecimalColumnVector)batch. cols[this.inputExpression.getOutputColumn()]; - Decimal128[] vector = inputVector.vector; + HiveDecimalWritable[] vector = inputVector.vector; if (inputVector.noNulls) { if (inputVector.isRepeating) { @@ -231,7 +243,7 @@ private void iterateNoNullsRepeatingWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128 value, + HiveDecimalWritable value, int batchSize) { for (int i=0; i < batchSize; ++i) { @@ -239,14 +251,14 @@ aggregationBufferSets, bufferIndex, i); - myagg.sumValueWithCheck(value, this.sumScale); + myagg.sumValueWithNullCheck(value, this.sumScale); } } private void iterateNoNullsSelectionWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128[] values, + HiveDecimalWritable[] values, int[] selection, int batchSize) { @@ -255,28 +267,28 @@ aggregationBufferSets, bufferIndex, i); - myagg.sumValueWithCheck(values[selection[i]], this.sumScale); + myagg.sumValueWithNullCheck(values[selection[i]], this.sumScale); } } private void iterateNoNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128[] values, + HiveDecimalWritable[] values, int batchSize) { for (int i=0; i < batchSize; ++i) { Aggregation myagg = getCurrentAggregationBuffer( aggregationBufferSets, bufferIndex, i); - myagg.sumValueWithCheck(values[i], this.sumScale); + myagg.sumValueWithNullCheck(values[i], this.sumScale); } } private void iterateHasNullsRepeatingSelectionWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128 value, + HiveDecimalWritable value, int batchSize, int[] selection, boolean[] isNull) { @@ -287,7 +299,7 @@ aggregationBufferSets, bufferIndex, i); - myagg.sumValueWithCheck(value, this.sumScale); + myagg.sumValueWithNullCheck(value, this.sumScale); } } @@ -296,7 +308,7 @@ private void iterateHasNullsRepeatingWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128 value, + HiveDecimalWritable value, int batchSize, boolean[] isNull) { @@ -306,7 +318,7 @@ aggregationBufferSets, bufferIndex, i); - myagg.sumValueWithCheck(value, this.sumScale); + myagg.sumValueWithNullCheck(value, this.sumScale); } } } @@ -314,7 +326,7 @@ private void iterateHasNullsSelectionWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128[] values, + HiveDecimalWritable[] values, int batchSize, int[] selection, boolean[] isNull) { @@ -326,7 +338,7 @@ aggregationBufferSets, bufferIndex, j); - myagg.sumValueWithCheck(values[i], this.sumScale); + myagg.sumValueWithNullCheck(values[i], this.sumScale); } } } @@ -334,7 +346,7 @@ private void iterateHasNullsWithAggregationSelection( VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, - Decimal128[] values, + HiveDecimalWritable[] values, int batchSize, boolean[] isNull) { @@ -344,7 +356,7 @@ aggregationBufferSets, bufferIndex, i); - myagg.sumValueWithCheck(values[i], this.sumScale); + myagg.sumValueWithNullCheck(values[i], this.sumScale); } } } @@ -367,18 +379,31 @@ Aggregation myagg = (Aggregation)agg; - Decimal128[] vector = inputVector.vector; + HiveDecimalWritable[] vector = inputVector.vector; if (inputVector.isRepeating) { if (inputVector.noNulls) { if (myagg.isNull) { myagg.isNull = false; - myagg.sum.zeroClear(); + myagg.sum.set(HiveDecimal.ZERO); myagg.count = 0; } - scratchDecimal.update(batchSize); - scratchDecimal.multiplyDestructive(vector[0], vector[0].getScale()); - myagg.sum.update(scratchDecimal); + HiveDecimal value = vector[0].getHiveDecimal(); + HiveDecimal multiple; + try { + multiple = value.multiply(HiveDecimal.create(batchSize)); + } catch (ArithmeticException e) { // catch on overflow + myagg.isOutOfRange = true; + return; + } + HiveDecimal result; + try { + result = myagg.sum.getHiveDecimal().add(multiple); + } catch (ArithmeticException e) { // catch on overflow + myagg.isOutOfRange = true; + return; + } + myagg.sum.set(result); myagg.count += batchSize; } return; @@ -400,7 +425,7 @@ private void iterateSelectionHasNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, int batchSize, boolean[] isNull, int[] selected) { @@ -408,57 +433,57 @@ for (int j=0; j< batchSize; ++j) { int i = selected[j]; if (!isNull[i]) { - Decimal128 value = vector[i]; - myagg.sumValueWithCheck(value, this.sumScale); + HiveDecimalWritable value = vector[i]; + myagg.sumValueWithNullCheck(value, this.sumScale); } } } private void iterateSelectionNoNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, int batchSize, int[] selected) { if (myagg.isNull) { myagg.isNull = false; - myagg.sum.zeroClear(); + myagg.sum.set(HiveDecimal.ZERO); myagg.count = 0; } for (int i=0; i< batchSize; ++i) { - Decimal128 value = vector[selected[i]]; - myagg.sumValueNoCheck(value, this.sumScale); + HiveDecimalWritable value = vector[selected[i]]; + myagg.sumValueNoNullCheck(value, this.sumScale); } } private void iterateNoSelectionHasNulls( Aggregation myagg, - Decimal128[] vector, + HiveDecimalWritable[] vector, int batchSize, boolean[] isNull) { for(int i=0;i tRealOutputFormat = new ThreadLocal() { + @Override + protected String initialValue() { + return null; + } + }; @SuppressWarnings("unchecked") private static Map, Class> @@ -105,11 +110,9 @@ } Class result = outputFormatSubstituteMap .get(origin); - //register this output format into the map for the first time - if ((storagehandlerflag == true) && (result == null)) { + if ((storagehandlerflag == true) && (result == null || result == HivePassThroughOutputFormat.class)) { HiveFileFormatUtils.setRealOutputFormatClassName(origin.getName()); result = HivePassThroughOutputFormat.class; - HiveFileFormatUtils.registerOutputFormatSubstitute((Class) origin,HivePassThroughOutputFormat.class); } return result; } @@ -120,7 +123,7 @@ @SuppressWarnings("unchecked") public static String getRealOutputFormatClassName() { - return realoutputFormat; + return tRealOutputFormat.get(); } /** @@ -129,7 +132,7 @@ public static void setRealOutputFormatClassName( String destination) { if (destination != null){ - realoutputFormat = destination; + tRealOutputFormat.set(destination); } else { return; Index: ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java (working copy) @@ -30,14 +30,17 @@ import org.apache.avro.file.DataFileWriter; import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.generic.GenericRecord; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.serde2.avro.AvroGenericRecordWritable; import org.apache.hadoop.hive.serde2.avro.AvroSerdeException; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; -import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; @@ -47,8 +50,10 @@ * Write to an Avro file from a Hive process. */ public class AvroContainerOutputFormat - implements HiveOutputFormat { + implements HiveOutputFormat { + public static final Log LOG = LogFactory.getLog(AvroContainerOutputFormat.class); + @Override public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jobConf, Path path, Class valueClass, boolean isCompressed, @@ -75,24 +80,65 @@ return new AvroGenericRecordWriter(dfw); } - //no records will be emitted from Hive - @Override - public RecordWriter - getRecordWriter(FileSystem ignored, JobConf job, String name, - Progressable progress) { - return new RecordWriter() { - @Override - public void write(LongWritable key, AvroGenericRecordWritable value) { - throw new RuntimeException("Should not be called"); + class WrapperRecordWriter implements RecordWriter { + FileSinkOperator.RecordWriter hiveWriter = null; + JobConf jobConf; + Progressable progressable; + String fileName; + + public WrapperRecordWriter(JobConf jobConf, Progressable progressable, String fileName){ + this.progressable = progressable; + this.jobConf = jobConf; + this.fileName = fileName; + } + + private FileSinkOperator.RecordWriter getHiveWriter() throws IOException { + if (this.hiveWriter == null){ + Properties properties = new Properties(); + for (AvroSerdeUtils.AvroTableProperties tableProperty : AvroSerdeUtils.AvroTableProperties.values()){ + String propVal; + if((propVal = jobConf.get(tableProperty.getPropName())) != null){ + properties.put(tableProperty.getPropName(),propVal); + } + } + + Boolean isCompressed = jobConf.getBoolean("mapreduce.output.fileoutputformat.compress", false); + Path path = new Path(this.fileName); + if(path.getFileSystem(jobConf).isDirectory(path)){ + // This path is only potentially encountered during setup + // Otherwise, a specific part_xxxx file name is generated and passed in. + path = new Path(path,"_dummy"); + } + + this.hiveWriter = getHiveRecordWriter(jobConf,path,null,isCompressed, properties, progressable); } + return this.hiveWriter; + } - @Override - public void close(Reporter reporter) { - } - }; + @Override + public void write(K key, V value) throws IOException { + getHiveWriter().write(value); + } + + @Override + public void close(Reporter reporter) throws IOException { + // Normally, I'd worry about the blanket false being passed in here, and that + // it'd need to be integrated into an abort call for an OutputCommitter, but the + // underlying recordwriter ignores it and throws it away, so it's irrelevant. + getHiveWriter().close(false); + } + } + //no records will be emitted from Hive @Override + public RecordWriter + getRecordWriter(FileSystem ignored, JobConf job, String fileName, + Progressable progress) throws IOException { + return new WrapperRecordWriter(job,progress,fileName); + } + + @Override public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException { return; // Not doing any check } Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (working copy) @@ -1259,12 +1259,9 @@ if (!result.isNull[0]) { BigInteger bInt = SerializationUtils.readBigInteger(valueStream); short scaleInData = (short) scaleStream.next(); - result.vector[0].update(bInt, scaleInData); - - // Change the scale to match the schema if the scale in data is different. - if (scale != scaleInData) { - result.vector[0].changeScaleDestructive((short) scale); - } + HiveDecimal dec = HiveDecimal.create(bInt, scaleInData); + dec = HiveDecimalUtils.enforcePrecisionScale(dec, precision, scale); + result.set(0, dec); } } else { // result vector has isNull values set, use the same to read scale vector. @@ -1273,13 +1270,10 @@ for (int i = 0; i < batchSize; i++) { if (!result.isNull[i]) { BigInteger bInt = SerializationUtils.readBigInteger(valueStream); - result.vector[i].update(bInt, (short) scratchScaleVector.vector[i]); - - // Change the scale to match the schema if the scale is less than in data. - // (HIVE-7373) If scale is bigger, then it leaves the original trailing zeros - if (scale < scratchScaleVector.vector[i]) { - result.vector[i].changeScaleDestructive((short) scale); - } + short scaleInData = (short) scratchScaleVector.vector[i]; + HiveDecimal dec = HiveDecimal.create(bInt, scaleInData); + dec = HiveDecimalUtils.enforcePrecisionScale(dec, precision, scale); + result.set(i, dec); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java (working copy) @@ -18,14 +18,14 @@ package org.apache.hadoop.hive.ql.log; -import java.util.HashMap; -import java.util.Map; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.session.SessionState; +import java.util.HashMap; +import java.util.Map; + /** * PerfLogger. * @@ -147,10 +147,37 @@ } public Long getStartTime(String method) { - return startTimes.get(method); + long startTime = 0L; + + if (startTimes.containsKey(method)) { + startTime = startTimes.get(method); + } + return startTime; } public Long getEndTime(String method) { - return endTimes.get(method); + long endTime = 0L; + + if (endTimes.containsKey(method)) { + endTime = endTimes.get(method); + } + return endTime; } + + public boolean startTimeHasMethod(String method) { + return startTimes.containsKey(method); + } + + public boolean endTimeHasMethod(String method) { + return endTimes.containsKey(method); + } + + public Long getDuration(String method) { + long duration = 0; + if (startTimes.containsKey(method) && endTimes.containsKey(method)) { + duration = endTimes.get(method) - startTimes.get(method); + } + return duration; + } + } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (working copy) @@ -18,16 +18,6 @@ package org.apache.hadoop.hive.ql.optimizer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; @@ -39,7 +29,6 @@ import org.apache.hadoop.hive.ql.exec.LateralViewForwardOperator; import org.apache.hadoop.hive.ql.exec.LateralViewJoinOperator; import org.apache.hadoop.hive.ql.exec.LimitOperator; -import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.PTFOperator; @@ -76,6 +65,16 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + /** * Factory for generating the different node processors used by ColumnPruner. */ @@ -600,8 +599,7 @@ // revert output cols of SEL(*) to ExprNodeColumnDesc String[] tabcol = rr.reverseLookup(col); ColumnInfo colInfo = rr.get(tabcol[0], tabcol[1]); - ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(colInfo.getType(), - colInfo.getInternalName(), colInfo.getTabAlias(), colInfo.getIsVirtualCol()); + ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(colInfo); colList.add(colExpr); outputColNames.add(col); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java (working copy) @@ -31,11 +31,13 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.optimizer.stats.annotation.StatsRulesProcFactory; import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc.ExprNodeDescEqualityWrapper; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.stats.StatsUtils; import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.AUTOPARALLEL; import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNIFORM; @@ -82,7 +84,8 @@ for (Operator sibling: sink.getChildOperators().get(0).getParentOperators()) { if (sibling.getStatistics() != null) { - numberOfBytes += sibling.getStatistics().getDataSize(); + numberOfBytes = StatsUtils.safeAdd( + numberOfBytes, sibling.getStatistics().getDataSize()); } else { LOG.warn("No stats available from: "+sibling); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java (working copy) @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.QBJoinTree; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; @@ -164,12 +165,23 @@ return null; } + // have to create a QBJoinTree for the cloned join operator + QBJoinTree originJoinTree = parseContext.getJoinContext().get(joinOp); + QBJoinTree newJoinTree; + try { + newJoinTree = originJoinTree.clone(); + } catch (CloneNotSupportedException e) { + LOG.debug("QBJoinTree could not be cloned: ", e); + return null; + } + JoinOperator joinOpClone; if (processSelect) { joinOpClone = (JoinOperator)(currOpClone.getParentOperators().get(0)); } else { joinOpClone = (JoinOperator)currOpClone; } + parseContext.getJoinContext().put(joinOpClone, newJoinTree); List tableScanCloneOpsForJoin = new ArrayList(); @@ -201,6 +213,7 @@ } parseContext.getTopOps().put(newAlias, tso); + setUpAlias(originJoinTree, newJoinTree, tabAlias, newAlias, tso); } // Now do a union of the select operators: selectOp and selectOpClone @@ -610,6 +623,48 @@ } } } + + /** + * Set alias in the cloned join tree + */ + private static void setUpAlias(QBJoinTree origin, QBJoinTree cloned, String origAlias, + String newAlias, Operator topOp) { + cloned.getAliasToOpInfo().remove(origAlias); + cloned.getAliasToOpInfo().put(newAlias, topOp); + if (origin.getLeftAlias().equals(origAlias)) { + cloned.setLeftAlias(null); + cloned.setLeftAlias(newAlias); + } + replaceAlias(origin.getLeftAliases(), cloned.getLeftAliases(), origAlias, newAlias); + replaceAlias(origin.getRightAliases(), cloned.getRightAliases(), origAlias, newAlias); + replaceAlias(origin.getBaseSrc(), cloned.getBaseSrc(), origAlias, newAlias); + replaceAlias(origin.getMapAliases(), cloned.getMapAliases(), origAlias, newAlias); + replaceAlias(origin.getStreamAliases(), cloned.getStreamAliases(), origAlias, newAlias); + } + + private static void replaceAlias(String[] origin, String[] cloned, + String alias, String newAlias) { + if (origin == null || cloned == null || origin.length != cloned.length) { + return; + } + for (int i = 0; i < origin.length; i++) { + if (origin[i].equals(alias)) { + cloned[i] = newAlias; + } + } + } + + private static void replaceAlias(List origin, List cloned, + String alias, String newAlias) { + if (origin == null || cloned == null || origin.size() != cloned.size()) { + return; + } + for (int i = 0; i < origin.size(); i++) { + if (origin.get(i).equals(alias)) { + cloned.set(i, newAlias); + } + } + } } /* (non-Javadoc) Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java (working copy) @@ -18,14 +18,8 @@ package org.apache.hadoop.hive.ql.optimizer; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -72,8 +66,14 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; /** * When dynamic partitioning (with or without bucketing and sorting) is enabled, this optimization @@ -157,7 +157,11 @@ // the reduce sink key. Since both key columns are not prefix subset // ReduceSinkDeDuplication will not merge them together resulting in 2 MR jobs. // To avoid that we will remove the RS (and EX) inserted by enforce bucketing/sorting. - removeRSInsertedByEnforceBucketing(fsOp); + if (!removeRSInsertedByEnforceBucketing(fsOp)) { + LOG.debug("Bailing out of sort dynamic partition optimization as some partition columns " + + "got constant folded."); + return null; + } // unlink connection between FS and its parent Operator fsParent = fsOp.getParentOperators().get(0); @@ -209,8 +213,7 @@ ArrayList newValueCols = Lists.newArrayList(); Map colExprMap = Maps.newHashMap(); for (ColumnInfo ci : valColInfo) { - newValueCols.add(new ExprNodeColumnDesc(ci.getType(), ci.getInternalName(), ci - .getTabAlias(), ci.isHiddenVirtualCol())); + newValueCols.add(new ExprNodeColumnDesc(ci)); colExprMap.put(ci.getInternalName(), newValueCols.get(newValueCols.size() - 1)); } ReduceSinkDesc rsConf = getReduceSinkDesc(partitionPositions, sortPositions, sortOrder, @@ -263,7 +266,7 @@ // Remove RS and EX introduced by enforce bucketing/sorting config // Convert PARENT -> RS -> EX -> FS to PARENT -> FS - private void removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) { + private boolean removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) { HiveConf hconf = parseCtx.getConf(); boolean enforceBucketing = HiveConf.getBoolVar(hconf, ConfVars.HIVEENFORCEBUCKETING); boolean enforceSorting = HiveConf.getBoolVar(hconf, ConfVars.HIVEENFORCESORTING); @@ -298,17 +301,27 @@ Operator rsGrandChild = rsChild.getChildOperators().get(0); if (rsChild instanceof ExtractOperator) { + // if schema size cannot be matched, then it could be because of constant folding + // converting partition column expression to constant expression. The constant + // expression will then get pruned by column pruner since it will not reference to + // any columns. + if (rsParent.getSchema().getSignature().size() != + rsChild.getSchema().getSignature().size()) { + return false; + } rsParent.getChildOperators().clear(); rsParent.getChildOperators().add(rsGrandChild); rsGrandChild.getParentOperators().clear(); rsGrandChild.getParentOperators().add(rsParent); parseCtx.removeOpParseCtx(rsToRemove); parseCtx.removeOpParseCtx(rsChild); - LOG.info("Removed " + rsParent.getOperatorId() + " and " + rsChild.getOperatorId() + LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + rsChild.getOperatorId() + " as it was introduced by enforce bucketing/sorting."); } } } + + return true; } private List getPartitionPositions(DynamicPartitionCtx dpCtx, RowSchema schema) { @@ -476,8 +489,7 @@ for (Integer idx : pos) { ColumnInfo ci = colInfos.get(idx); - ExprNodeColumnDesc encd = new ExprNodeColumnDesc(ci.getType(), ci.getInternalName(), - ci.getTabAlias(), ci.isHiddenVirtualCol()); + ExprNodeColumnDesc encd = new ExprNodeColumnDesc(ci); cols.add(encd); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java (working copy) @@ -75,7 +75,7 @@ return vCols; } - public static boolean validateASTForCBO(ASTNode ast) { + public static boolean validateASTForUnsupportedTokens(ASTNode ast) { String astTree = ast.toStringTree(); // if any of following tokens are present in AST, bail out String[] tokens = { "TOK_CHARSETLITERAL","TOK_TABLESPLITSAMPLE" }; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java (working copy) @@ -108,7 +108,7 @@ boolean argsPruned = false; GenericUDF hiveUDF = SqlFunctionConverter.getHiveUDF(call.getOperator(), - call.getType()); + call.getType(), call.operands.size()); if (hiveUDF != null && !FunctionRegistry.isDeterministic(hiveUDF)) { return null; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java (working copy) @@ -89,17 +89,17 @@ ArrayList tmpExprArgs = new ArrayList(); tmpExprArgs.addAll(args.subList(0, 2)); gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), - SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), tmpExprArgs); + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType(), 2), tmpExprArgs); for (int i = 2; i < call.operands.size(); i++) { tmpExprArgs = new ArrayList(); tmpExprArgs.add(gfDesc); tmpExprArgs.add(args.get(i)); gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), - SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), tmpExprArgs); + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType(), 2), tmpExprArgs); } } else { gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), - SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), args); + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType(), args.size()), args); } return gfDesc; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java (working copy) @@ -17,15 +17,6 @@ */ package org.apache.hadoop.hive.ql.optimizer.optiq.translator; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; - import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FunctionInfo; @@ -47,6 +38,15 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + /** * JoinCondTypeCheckProcFactory is used by Optiq planner(CBO) to generate Join Conditions from Join Condition AST. * Reasons for sub class: @@ -99,8 +99,7 @@ if (!qualifiedAccess) { colInfo = getColInfo(ctx, null, tableOrCol, expr); // It's a column. - return new ExprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName(), - colInfo.getTabAlias(), colInfo.getIsVirtualCol()); + return new ExprNodeColumnDesc(colInfo); } else if (hasTableAlias(ctx, tableOrCol, expr)) { return null; } else { Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java (working copy) @@ -98,10 +98,19 @@ return getOptiqFn(name, optiqArgTypes, retType); } - public static GenericUDF getHiveUDF(SqlOperator op, RelDataType dt) { + public static GenericUDF getHiveUDF(SqlOperator op, RelDataType dt, int argsLength) { String name = reverseOperatorMap.get(op); - if (name == null) + if (name == null) { name = op.getName(); + } + // Make sure we handle unary + and - correctly. + if (argsLength == 1) { + if (name == "+") { + name = FunctionRegistry.UNARY_PLUS_FUNC_NAME; + } else if (name == "-") { + name = FunctionRegistry.UNARY_MINUS_FUNC_NAME; + } + } FunctionInfo hFn = name != null ? FunctionRegistry.getFunctionInfo(name) : null; if (hFn == null) hFn = handleExplicitCast(op, dt); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java (working copy) @@ -187,7 +187,7 @@ throw new RuntimeException("Unsupported Type : " + type.getTypeName()); } - return convertedType; + return dtFactory.createTypeWithNullability(convertedType, true); } public static RelDataType convert(ListTypeInfo lstType, Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (working copy) @@ -18,13 +18,6 @@ package org.apache.hadoop.hive.ql.optimizer.physical; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ColumnInfo; @@ -60,6 +53,13 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + /** * GenMRSkewJoinProcessor. * @@ -192,9 +192,7 @@ String newColName = i + "_VALUE_" + k; // any name, it does not matter. ColumnInfo columnInfo = new ColumnInfo(newColName, type, alias.toString(), false); columnInfos.add(columnInfo); - newValueExpr.add(new ExprNodeColumnDesc( - columnInfo.getType(), columnInfo.getInternalName(), - columnInfo.getTabAlias(), false)); + newValueExpr.add(new ExprNodeColumnDesc(columnInfo)); if (!first) { colNames = colNames + ","; colTypes = colTypes + ","; @@ -216,9 +214,7 @@ ColumnInfo columnInfo = new ColumnInfo(joinKeys.get(k), TypeInfoFactory .getPrimitiveTypeInfo(joinKeyTypes.get(k)), alias.toString(), false); columnInfos.add(columnInfo); - newKeyExpr.add(new ExprNodeColumnDesc( - columnInfo.getType(), columnInfo.getInternalName(), - columnInfo.getTabAlias(), false)); + newKeyExpr.add(new ExprNodeColumnDesc(columnInfo)); } newJoinValues.put(alias, newValueExpr); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (working copy) @@ -18,13 +18,8 @@ package org.apache.hadoop.hive.ql.optimizer.stats.annotation; -import java.lang.reflect.Field; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -42,6 +37,7 @@ import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.SelectOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -77,8 +73,13 @@ import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.lang.reflect.Field; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; public class StatsRulesProcFactory { @@ -170,7 +171,7 @@ // in case of select(*) the data size does not change if (!sop.getConf().isSelectStar() && !sop.getConf().isSelStarNoCompute()) { long dataSize = StatsUtils.getDataSizeFromColumnStats(stats.getNumRows(), colStats); - stats.setDataSize(setMaxIfInvalid(dataSize)); + stats.setDataSize(dataSize); } sop.setStatistics(stats); @@ -322,8 +323,8 @@ } else if (udf instanceof GenericUDFOPOr) { // for OR condition independently compute and update stats for (ExprNodeDesc child : genFunc.getChildren()) { - newNumRows += evaluateChildExpr(stats, child, aspCtx, neededCols, - fop); + newNumRows = StatsUtils.safeAdd( + evaluateChildExpr(stats, child, aspCtx, neededCols, fop), newNumRows); } } else if (udf instanceof GenericUDFOPNot) { newNumRows = evaluateNotExpr(stats, pred, aspCtx, neededCols, fop); @@ -677,9 +678,9 @@ if (cs != null) { long ndv = cs.getCountDistint(); if (cs.getNumNulls() > 0) { - ndv += 1; + ndv = StatsUtils.safeAdd(ndv, 1); } - ndvProduct *= ndv; + ndvProduct = StatsUtils.safeMult(ndvProduct, ndv); } else { if (parentStats.getColumnStatsState().equals(Statistics.State.COMPLETE)) { // the column must be an aggregate column inserted by GBY. We @@ -714,15 +715,16 @@ if (mapSideHashAgg) { if (containsGroupingSet) { // Case 4: column stats, hash aggregation, grouping sets - cardinality = Math.min((parentNumRows * sizeOfGroupingSet) / 2, - ndvProduct * parallelism * sizeOfGroupingSet); + cardinality = Math.min( + (StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet)) / 2, + StatsUtils.safeMult(StatsUtils.safeMult(ndvProduct, parallelism), sizeOfGroupingSet)); if (isDebugEnabled) { LOG.debug("[Case 4] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } else { // Case 3: column stats, hash aggregation, NO grouping sets - cardinality = Math.min(parentNumRows / 2, ndvProduct * parallelism); + cardinality = Math.min(parentNumRows / 2, StatsUtils.safeMult(ndvProduct, parallelism)); if (isDebugEnabled) { LOG.debug("[Case 3] STATS-" + gop.toString() + ": cardinality: " + cardinality); @@ -731,7 +733,7 @@ } else { if (containsGroupingSet) { // Case 6: column stats, NO hash aggregation, grouping sets - cardinality = parentNumRows * sizeOfGroupingSet; + cardinality = StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet); if (isDebugEnabled) { LOG.debug("[Case 6] STATS-" + gop.toString() + ": cardinality: " + cardinality); @@ -758,7 +760,7 @@ if (containsGroupingSet) { // Case 8: column stats, grouping sets - cardinality = Math.min(parentNumRows, ndvProduct * sizeOfGroupingSet); + cardinality = Math.min(parentNumRows, StatsUtils.safeMult(ndvProduct, sizeOfGroupingSet)); if (isDebugEnabled) { LOG.debug("[Case 8] STATS-" + gop.toString() + ": cardinality: " + cardinality); @@ -789,7 +791,7 @@ if (containsGroupingSet) { // Case 2: NO column stats, NO hash aggregation, grouping sets - cardinality = parentNumRows * sizeOfGroupingSet; + cardinality = StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet); if (isDebugEnabled) { LOG.debug("[Case 2] STATS-" + gop.toString() + ": cardinality: " + cardinality); @@ -828,7 +830,6 @@ // for those newly added columns if (!colExprMap.containsKey(ci.getInternalName())) { String colName = ci.getInternalName(); - colName = StatsUtils.stripPrefixFromColumnName(colName); String tabAlias = ci.getTabAlias(); String colType = ci.getTypeName(); ColStatistics cs = new ColStatistics(tabAlias, colName, colType); @@ -902,7 +903,7 @@ long avgKeySize = 0; for (ColStatistics cs : colStats) { if (cs != null) { - numEstimatedRows *= cs.getCountDistint(); + numEstimatedRows = StatsUtils.safeMult(numEstimatedRows, cs.getCountDistint()); avgKeySize += Math.ceil(cs.getAvgColLen()); } } @@ -956,7 +957,7 @@ long hashEntrySize = gop.javaHashEntryOverHead + avgKeySize + avgValSize; // estimated hash table size - long estHashTableSize = numEstimatedRows * hashEntrySize; + long estHashTableSize = StatsUtils.safeMult(numEstimatedRows, hashEntrySize); if (estHashTableSize < maxMemHashAgg) { return true; @@ -1065,7 +1066,7 @@ // detect if there are multiple attributes in join key ReduceSinkOperator rsOp = (ReduceSinkOperator) jop.getParentOperators().get(0); - List keyExprs = rsOp.getConf().getKeyCols(); + List keyExprs = rsOp.getConf().getOutputKeyColumnNames(); numAttr = keyExprs.size(); // infer PK-FK relationship in single attribute join case @@ -1077,7 +1078,7 @@ ReduceSinkOperator parent = (ReduceSinkOperator) jop.getParentOperators().get(pos); Statistics parentStats = parent.getStatistics(); - keyExprs = parent.getConf().getKeyCols(); + keyExprs = parent.getConf().getOutputKeyColumnNames(); // Parent RS may have column statistics from multiple parents. // Populate table alias to row count map, this will be used later to @@ -1096,8 +1097,8 @@ // used to quickly look-up for column statistics of join key. // TODO: expressions in join condition will be ignored. assign // internal name for expressions and estimate column statistics for expression. - List fqCols = - StatsUtils.getFullQualifedColNameFromExprs(keyExprs, parent.getColumnExprMap()); + List fqCols = StatsUtils.getFullyQualifedReducerKeyNames(keyExprs, + parent.getColumnExprMap()); joinKeys.put(pos, fqCols); // get column statistics for all output columns @@ -1119,7 +1120,6 @@ for (int idx = 0; idx < numAttr; idx++) { for (Integer i : joinKeys.keySet()) { String col = joinKeys.get(i).get(idx); - col = StatsUtils.stripPrefixFromColumnName(col); ColStatistics cs = joinedColStats.get(col); if (cs != null) { perAttrDVs.add(cs.getCountDistint()); @@ -1136,13 +1136,12 @@ denom = getEasedOutDenominator(distinctVals); } else { for (Long l : distinctVals) { - denom *= l; + denom = StatsUtils.safeMult(denom, l); } } } else { for (List jkeys : joinKeys.values()) { for (String jk : jkeys) { - jk = StatsUtils.stripPrefixFromColumnName(jk); ColStatistics cs = joinedColStats.get(jk); if (cs != null) { distinctVals.add(cs.getCountDistint()); @@ -1166,7 +1165,6 @@ ExprNodeDesc end = colExprMap.get(key); if (end instanceof ExprNodeColumnDesc) { String colName = ((ExprNodeColumnDesc) end).getColumn(); - colName = StatsUtils.stripPrefixFromColumnName(colName); String tabAlias = ((ExprNodeColumnDesc) end).getTabAlias(); String fqColName = StatsUtils.getFullyQualifiedColumnName(tabAlias, colName); ColStatistics cs = joinedColStats.get(fqColName); @@ -1214,13 +1212,13 @@ } long maxDataSize = parentSizes.get(maxRowIdx); - long newNumRows = (long) (joinFactor * maxRowCount * (numParents - 1)); - long newDataSize = (long) (joinFactor * maxDataSize * (numParents - 1)); + long newNumRows = StatsUtils.safeMult(StatsUtils.safeMult(maxRowCount, (numParents - 1)), joinFactor); + long newDataSize = StatsUtils.safeMult(StatsUtils.safeMult(maxDataSize, (numParents - 1)), joinFactor); Statistics wcStats = new Statistics(); - wcStats.setNumRows(setMaxIfInvalid(newNumRows)); - wcStats.setDataSize(setMaxIfInvalid(newDataSize)); + wcStats.setNumRows(newNumRows); + wcStats.setDataSize(newDataSize); jop.setStatistics(wcStats); - + if (isDebugEnabled) { LOG.debug("[1] STATS-" + jop.toString() + ": " + wcStats.extendedToString()); } @@ -1339,6 +1337,7 @@ } } + // No need for overflow checks, assume selectivity is always <= 1.0 float selMultiParent = 1.0f; for(Operator parent : multiParentOp.getParentOperators()) { // In the above example, TS-1 -> RS-1 and TS-2 -> RS-2 are simple trees @@ -1369,8 +1368,8 @@ Operator op = ops.get(i); if (op != null && op instanceof ReduceSinkOperator) { ReduceSinkOperator rsOp = (ReduceSinkOperator) op; - List keys = rsOp.getConf().getKeyCols(); - List fqCols = StatsUtils.getFullQualifedColNameFromExprs(keys, + List keys = rsOp.getConf().getOutputKeyColumnNames(); + List fqCols = StatsUtils.getFullyQualifedReducerKeyNames(keys, rsOp.getColumnExprMap()); if (fqCols.size() == 1) { String joinCol = fqCols.get(0); @@ -1400,8 +1399,8 @@ Operator op = ops.get(i); if (op instanceof ReduceSinkOperator) { ReduceSinkOperator rsOp = (ReduceSinkOperator) op; - List keys = rsOp.getConf().getKeyCols(); - List fqCols = StatsUtils.getFullQualifedColNameFromExprs(keys, + List keys = rsOp.getConf().getOutputKeyColumnNames(); + List fqCols = StatsUtils.getFullyQualifedReducerKeyNames(keys, rsOp.getColumnExprMap()); if (fqCols.size() == 1) { String joinCol = fqCols.get(0); @@ -1441,7 +1440,7 @@ LOG.info("STATS-" + jop.toString() + ": Overflow in number of rows." + newNumRows + " rows will be set to Long.MAX_VALUE"); } - newNumRows = setMaxIfInvalid(newNumRows); + newNumRows = StatsUtils.getMaxIfOverflow(newNumRows); stats.setNumRows(newNumRows); // scale down/up the column statistics based on the changes in number of @@ -1472,7 +1471,7 @@ stats.setColumnStats(colStats); long newDataSize = StatsUtils .getDataSizeFromColumnStats(newNumRows, colStats); - stats.setDataSize(setMaxIfInvalid(newDataSize)); + stats.setDataSize(StatsUtils.getMaxIfOverflow(newDataSize)); } private long computeNewRowCount(List rowCountParents, long denom) { @@ -1494,7 +1493,7 @@ for (int i = 0; i < rowCountParents.size(); i++) { if (i != maxIdx) { - result *= rowCountParents.get(i); + result = StatsUtils.safeMult(result, rowCountParents.get(i)); } } @@ -1512,7 +1511,6 @@ // find min NDV for joining columns for (Map.Entry> entry : joinKeys.entrySet()) { String key = entry.getValue().get(joinColIdx); - key = StatsUtils.stripPrefixFromColumnName(key); ColStatistics cs = joinedColStats.get(key); if (cs != null && cs.getCountDistint() < minNDV) { minNDV = cs.getCountDistint(); @@ -1523,7 +1521,6 @@ if (minNDV != Long.MAX_VALUE) { for (Map.Entry> entry : joinKeys.entrySet()) { String key = entry.getValue().get(joinColIdx); - key = StatsUtils.stripPrefixFromColumnName(key); ColStatistics cs = joinedColStats.get(key); if (cs != null) { cs.setCountDistint(minNDV); @@ -1569,7 +1566,7 @@ long denom = 1; for (int i = 0; i < distinctVals.size(); i++) { if (i != minIdx) { - denom *= distinctVals.get(i); + denom = StatsUtils.safeMult(denom, distinctVals.get(i)); } } return denom; @@ -1613,12 +1610,13 @@ // in the absence of column statistics, compute data size based on // based on average row size Statistics wcStats = parentStats.clone(); + limit = StatsUtils.getMaxIfOverflow(limit); if (limit <= parentStats.getNumRows()) { long numRows = limit; long avgRowSize = parentStats.getAvgRowSize(); - long dataSize = avgRowSize * limit; - wcStats.setNumRows(setMaxIfInvalid(numRows)); - wcStats.setDataSize(setMaxIfInvalid(dataSize)); + long dataSize = StatsUtils.safeMult(avgRowSize, limit); + wcStats.setNumRows(numRows); + wcStats.setDataSize(dataSize); } lop.setStatistics(wcStats); @@ -1662,26 +1660,26 @@ if (satisfyPrecondition(parentStats)) { List colStats = Lists.newArrayList(); for (String key : outKeyColNames) { - String prefixedKey = "KEY." + key; + String prefixedKey = Utilities.ReduceField.KEY.toString() + "." + key; ExprNodeDesc end = colExprMap.get(prefixedKey); if (end != null) { ColStatistics cs = StatsUtils .getColStatisticsFromExpression(conf, parentStats, end); if (cs != null) { - cs.setColumnName(key); + cs.setColumnName(prefixedKey); colStats.add(cs); } } } for (String val : outValueColNames) { - String prefixedVal = "VALUE." + val; + String prefixedVal = Utilities.ReduceField.VALUE.toString() + "." + val; ExprNodeDesc end = colExprMap.get(prefixedVal); if (end != null) { ColStatistics cs = StatsUtils .getColStatisticsFromExpression(conf, parentStats, end); if (cs != null) { - cs.setColumnName(val); + cs.setColumnName(prefixedVal); colStats.add(cs); } } @@ -1815,7 +1813,7 @@ + newNumRows + " rows will be set to Long.MAX_VALUE"); } - newNumRows = setMaxIfInvalid(newNumRows); + newNumRows = StatsUtils.getMaxIfOverflow(newNumRows); long oldRowCount = stats.getNumRows(); double ratio = (double) newNumRows / (double) oldRowCount; stats.setNumRows(newNumRows); @@ -1842,10 +1840,10 @@ } stats.setColumnStats(colStats); long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats); - stats.setDataSize(setMaxIfInvalid(newDataSize)); + stats.setDataSize(StatsUtils.getMaxIfOverflow(newDataSize)); } else { long newDataSize = (long) (ratio * stats.getDataSize()); - stats.setDataSize(setMaxIfInvalid(newDataSize)); + stats.setDataSize(StatsUtils.getMaxIfOverflow(newDataSize)); } } @@ -1853,14 +1851,4 @@ return stats != null && stats.getBasicStatsState().equals(Statistics.State.COMPLETE) && !stats.getColumnStatsState().equals(Statistics.State.NONE); } - - /** - * negative number of rows or data sizes are invalid. It could be because of - * long overflow in which case return Long.MAX_VALUE - * @param val - input value - * @return Long.MAX_VALUE if val is negative else val - */ - static long setMaxIfInvalid(long val) { - return val < 0 ? Long.MAX_VALUE : val; - } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSQRewriteSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSQRewriteSemanticAnalyzer.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSQRewriteSemanticAnalyzer.java (working copy) @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.exec.ExplainSQRewriteTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.plan.ExplainSQRewriteWork; @@ -55,7 +56,7 @@ ctx ); - Task explTask = TaskFactory.get(work, conf); + ExplainSQRewriteTask explTask = (ExplainSQRewriteTask) TaskFactory.get(work, conf); fieldList = explTask.getResultSchema(); rootTasks.add(explTask); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (working copy) @@ -106,7 +106,7 @@ work.setAppendTaskType( HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES)); - Task explTask = TaskFactory.get(work, conf); + ExplainTask explTask = (ExplainTask) TaskFactory.get(work, conf); fieldList = explTask.getResultSchema(); rootTasks.add(explTask); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCond.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCond.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCond.java (working copy) @@ -79,4 +79,7 @@ this.joinType = joinType; } + public void setPreserved(boolean preserved) { + this.preserved = preserved; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (working copy) @@ -238,6 +238,8 @@ // create final load/move work + boolean preservePartitionSpecs = false; + Map partSpec = ts.getPartSpec(); if (partSpec == null) { partSpec = new LinkedHashMap(); @@ -252,9 +254,14 @@ throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION. getMsg(ts.tableName + ":" + part.getName())); } - outputs.add(new WriteEntity(part, - (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : - WriteEntity.WriteType.INSERT))); + if (isOverWrite){ + outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE)); + } else { + outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT)); + // If partition already exists and we aren't overwriting it, then respect + // its current location info rather than picking it from the parent TableDesc + preservePartitionSpecs = true; + } } else { outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : @@ -269,6 +276,12 @@ LoadTableDesc loadTableWork; loadTableWork = new LoadTableDesc(new Path(fromURI), Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite); + if (preservePartitionSpecs){ + // Note : preservePartitionSpecs=true implies inheritTableSpecs=false but + // but preservePartitionSpecs=false(default) here is not sufficient enough + // info to set inheritTableSpecs=true + loadTableWork.setInheritTableSpecs(false); + } Task childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal), conf); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (working copy) @@ -32,7 +32,7 @@ * Internal representation of the join tree. * */ -public class QBJoinTree implements Serializable{ +public class QBJoinTree implements Serializable, Cloneable { private static final long serialVersionUID = 1L; private String leftAlias; private String[] rightAliases; @@ -363,4 +363,70 @@ public List getPostJoinFilters() { return postJoinFilters; } + + @Override + public QBJoinTree clone() throws CloneNotSupportedException { + QBJoinTree cloned = new QBJoinTree(); + + // shallow copy aliasToOpInfo, we won't want to clone the operator tree here + cloned.setAliasToOpInfo(aliasToOpInfo == null ? null : + new HashMap>(aliasToOpInfo)); + + cloned.setBaseSrc(baseSrc == null ? null : baseSrc.clone()); + + // shallow copy ASTNode + cloned.setExpressions(expressions); + cloned.setFilters(filters); + cloned.setFiltersForPushing(filtersForPushing); + + // clone filterMap + int[][] clonedFilterMap = filterMap == null ? null : new int[filterMap.length][]; + if (filterMap != null) { + for (int i = 0; i < filterMap.length; i++) { + clonedFilterMap[i] = filterMap[i] == null ? null : filterMap[i].clone(); + } + } + cloned.setFilterMap(clonedFilterMap); + + cloned.setId(id); + + // clone joinCond + JoinCond[] clonedJoinCond = joinCond == null ? null : new JoinCond[joinCond.length]; + if (joinCond != null) { + for (int i = 0; i < joinCond.length; i++) { + if(joinCond[i] == null) { + continue; + } + JoinCond clonedCond = new JoinCond(); + clonedCond.setJoinType(joinCond[i].getJoinType()); + clonedCond.setLeft(joinCond[i].getLeft()); + clonedCond.setPreserved(joinCond[i].getPreserved()); + clonedCond.setRight(joinCond[i].getRight()); + clonedJoinCond[i] = clonedCond; + } + } + cloned.setJoinCond(clonedJoinCond); + + cloned.setJoinSrc(joinSrc == null ? null : joinSrc.clone()); + cloned.setLeftAlias(leftAlias); + cloned.setLeftAliases(leftAliases == null ? null : leftAliases.clone()); + cloned.setMapAliases(mapAliases == null ? null : new ArrayList(mapAliases)); + cloned.setMapSideJoin(mapSideJoin); + cloned.setNoOuterJoin(noOuterJoin); + cloned.setNoSemiJoin(noSemiJoin); + cloned.setNullSafes(nullsafes == null ? null : new ArrayList(nullsafes)); + cloned.setRightAliases(rightAliases == null ? null : rightAliases.clone()); + cloned.setStreamAliases(streamAliases == null ? null : new ArrayList(streamAliases)); + + // clone postJoinFilters + for (ASTNode filter : postJoinFilters) { + cloned.getPostJoinFilters().add(filter); + } + // clone rhsSemijoin + for (Entry> entry : rhsSemijoin.entrySet()) { + cloned.addRHSSemijoinColumns(entry.getKey(), entry.getValue()); + } + + return cloned; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -20,35 +20,13 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; -import java.io.IOException; -import java.io.Serializable; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.UndeclaredThrowableException; -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; - import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; -import net.hydromatic.optiq.SchemaPlus; -import net.hydromatic.optiq.tools.Frameworks; - import org.antlr.runtime.ClassicToken; import org.antlr.runtime.Token; import org.antlr.runtime.tree.Tree; @@ -122,7 +100,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.Optimizer; -import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.optimizer.optiq.HiveDefaultRelMetadataProvider; import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; import org.apache.hadoop.hive.ql.optimizer.optiq.HiveTypeSystemImpl; @@ -146,6 +123,7 @@ import org.apache.hadoop.hive.ql.optimizer.optiq.translator.RexNodeConverter; import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter; import org.apache.hadoop.hive.ql.optimizer.optiq.translator.TypeConverter; +import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; @@ -259,6 +237,9 @@ import org.eigenbase.rel.rules.MergeFilterRule; import org.eigenbase.rel.rules.PushFilterPastProjectRule; import org.eigenbase.rel.rules.PushFilterPastSetOpRule; +import org.eigenbase.rel.rules.PushSemiJoinPastFilterRule; +import org.eigenbase.rel.rules.PushSemiJoinPastJoinRule; +import org.eigenbase.rel.rules.PushSemiJoinPastProjectRule; import org.eigenbase.rel.rules.SemiJoinRel; import org.eigenbase.rel.rules.TransitivePredicatesOnJoinRule; import org.eigenbase.relopt.RelOptCluster; @@ -276,31 +257,52 @@ import org.eigenbase.reltype.RelDataTypeFactory; import org.eigenbase.reltype.RelDataTypeField; import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexFieldCollation; import org.eigenbase.rex.RexInputRef; import org.eigenbase.rex.RexNode; import org.eigenbase.rex.RexUtil; import org.eigenbase.rex.RexWindowBound; -import org.eigenbase.rex.RexFieldCollation; import org.eigenbase.sql.SqlAggFunction; +import org.eigenbase.sql.SqlCall; +import org.eigenbase.sql.SqlExplainLevel; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlLiteral; +import org.eigenbase.sql.SqlNode; import org.eigenbase.sql.SqlWindow; import org.eigenbase.sql.parser.SqlParserPos; import org.eigenbase.sql.type.SqlTypeName; import org.eigenbase.sql2rel.RelFieldTrimmer; -import org.eigenbase.sql.SqlCall; -import org.eigenbase.sql.SqlExplainLevel; -import org.eigenbase.sql.SqlKind; -import org.eigenbase.sql.SqlNode; -import org.eigenbase.sql.SqlLiteral; import org.eigenbase.util.CompositeList; import org.eigenbase.util.ImmutableIntList; import org.eigenbase.util.Pair; -import com.google.common.base.Function; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableList.Builder; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; +import java.io.IOException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.UndeclaredThrowableException; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import net.hydromatic.optiq.SchemaPlus; +import net.hydromatic.optiq.tools.Frameworks; + /** * Implementation of the semantic analyzer. It generates the query plan. * There are other specific semantic analyzers for some hive operations such as @@ -4130,9 +4132,7 @@ throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(grpbyExpr)); } - groupByKeys.add(new ExprNodeColumnDesc(exprInfo.getType(), exprInfo - .getInternalName(), exprInfo.getTabAlias(), exprInfo - .getIsVirtualCol())); + groupByKeys.add(new ExprNodeColumnDesc(exprInfo)); String field = getColumnInternalName(i); outputColumnNames.add(field); ColumnInfo oColInfo = new ColumnInfo(field, exprInfo.getType(), "", false); @@ -6260,8 +6260,10 @@ if (!("".equals(nm[0])) && nm[1] != null) { colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` } - String ctasColName = fixCtasColumnName(colName, colInfo, inputRR); - col.setName(ctasColName); + if (runCBO) { + colName = fixCtasColumnName(colName); + } + col.setName(colName); col.setType(colInfo.getType().getTypeName()); field_schemas.add(col); } @@ -6439,7 +6441,7 @@ return output; } - private static String fixCtasColumnName(String colName, ColumnInfo colInfo, RowResolver rr) { + private static String fixCtasColumnName(String colName) { int lastDot = colName.lastIndexOf('.'); if (lastDot < 0) return colName; // alias is not fully qualified String nqColumnName = colName.substring(lastDot + 1); @@ -6926,9 +6928,7 @@ for (ColumnInfo colInfo : inputRR.getColumnInfos()) { String internalName = getColumnInternalName(i++); outputColumns.add(internalName); - valueCols.add(new ExprNodeColumnDesc(colInfo.getType(), colInfo - .getInternalName(), colInfo.getTabAlias(), colInfo - .getIsVirtualCol())); + valueCols.add(new ExprNodeColumnDesc(colInfo)); colExprMap.put(internalName, valueCols .get(valueCols.size() - 1)); } @@ -7057,8 +7057,7 @@ ColumnInfo colInfo = columnInfos.get(i); String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); String[] nm2 = inputRR.getAlternateMappings(colInfo.getInternalName()); - ExprNodeColumnDesc value = new ExprNodeColumnDesc(colInfo.getType(), - colInfo.getInternalName(), colInfo.getTabAlias(), colInfo.getIsVirtualCol()); + ExprNodeColumnDesc value = new ExprNodeColumnDesc(colInfo); // backtrack can be null when input is script operator ExprNodeDesc valueBack = ExprNodeDescUtils.backtrack(value, dummy, input); @@ -7310,8 +7309,7 @@ ColumnInfo colInfo = columns.get(i); String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); String[] nm2 = inputRR.getAlternateMappings(colInfo.getInternalName()); - ExprNodeDesc expr = new ExprNodeColumnDesc(colInfo.getType(), - colInfo.getInternalName(), colInfo.getTabAlias(), colInfo.getIsVirtualCol()); + ExprNodeDesc expr = new ExprNodeColumnDesc(colInfo); // backtrack can be null when input is script operator ExprNodeDesc exprBack = ExprNodeDescUtils.backtrack(expr, dummy, child); @@ -8399,12 +8397,9 @@ new HashMap(); for (int i = 0; i < columns.size(); i++) { ColumnInfo col = columns.get(i); - colList.add(new ExprNodeColumnDesc(col.getType(), col.getInternalName(), - col.getTabAlias(), col.getIsVirtualCol())); + colList.add(new ExprNodeColumnDesc(col)); columnNames.add(col.getInternalName()); - columnExprMap.put(col.getInternalName(), - new ExprNodeColumnDesc(col.getType(), col.getInternalName(), - col.getTabAlias(), col.getIsVirtualCol())); + columnExprMap.put(col.getInternalName(), new ExprNodeColumnDesc(col)); } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( new SelectDesc(colList, columnNames, true), new RowSchema(inputRR @@ -9261,8 +9256,7 @@ for (String col : bucketCols) { ColumnInfo ci = rwsch.get(alias, col); // TODO: change type to the one in the table schema - args.add(new ExprNodeColumnDesc(ci.getType(), ci.getInternalName(), ci - .getTabAlias(), ci.getIsVirtualCol())); + args.add(new ExprNodeColumnDesc(ci)); } } else { for (ASTNode expr : ts.getExprs()) { @@ -9843,8 +9837,7 @@ for (ColumnInfo col : source.getColumnInfos()) { String[] tabCol = source.reverseLookup(col.getInternalName()); lvForwardRR.put(tabCol[0], tabCol[1], col); - ExprNodeDesc colExpr = new ExprNodeColumnDesc(col.getType(), col.getInternalName(), - col.getTabAlias(), false); + ExprNodeDesc colExpr = new ExprNodeColumnDesc(col); colList.add(colExpr); colNames.add(colExpr.getName()); lvfColExprMap.put(col.getInternalName(), colExpr); @@ -9933,8 +9926,7 @@ String tableAlias = tableCol[0]; String colAlias = tableCol[1]; dest.put(tableAlias, colAlias, newCol); - colExprMap.put(internalName, new ExprNodeColumnDesc(c.getType(), c.getInternalName(), - c.getTabAlias(), c.getIsVirtualCol())); + colExprMap.put(internalName, new ExprNodeColumnDesc(c)); } } @@ -10241,13 +10233,15 @@ // be supported and would require additional checks similar to IsQuery? boolean isSupportedType = qb.getIsQuery() || qb.isCTAS() || cboCtx.type == PreCboCtx.Type.INSERT; - boolean result = isSupportedRoot && isSupportedType && createVwDesc == null; + boolean noBadTokens = HiveOptiqUtil.validateASTForUnsupportedTokens(ast); + boolean result = isSupportedRoot && isSupportedType && createVwDesc == null && noBadTokens; if (!result) { if (needToLogMessage) { String msg = ""; if (!isSupportedRoot) msg += "doesn't have QUERY or EXPLAIN as root and not a CTAS; "; if (!isSupportedType) msg += "is not a query, CTAS, or insert; "; if (createVwDesc != null) msg += "has create view; "; + if (!noBadTokens) msg += "has unsupported tokens; "; if (msg.isEmpty()) msg += "has some unspecified limitations; "; LOG.info("Not invoking CBO because the statement " + msg.substring(0, msg.length() - 2)); @@ -11993,9 +11987,7 @@ */ int pos = 0; for (ColumnInfo colInfo : colInfoList) { - ExprNodeDesc valueColExpr = new ExprNodeColumnDesc(colInfo.getType(), colInfo - .getInternalName(), colInfo.getTabAlias(), colInfo - .getIsVirtualCol()); + ExprNodeDesc valueColExpr = new ExprNodeColumnDesc(colInfo); valueCols.add(valueColExpr); String internalName = SemanticAnalyzer.getColumnInternalName(pos++); outputColumnNames.add(internalName); @@ -12240,9 +12232,7 @@ RowResolver rsNewRR = new RowResolver(); int pos = 0; for (ColumnInfo colInfo : colInfoList) { - ExprNodeDesc valueColExpr = new ExprNodeColumnDesc(colInfo.getType(), colInfo - .getInternalName(), colInfo.getTabAlias(), colInfo - .getIsVirtualCol()); + ExprNodeDesc valueColExpr = new ExprNodeColumnDesc(colInfo); valueCols.add(valueColExpr); String internalName = SemanticAnalyzer.getColumnInternalName(pos++); outputColumnNames.add(internalName); @@ -12642,7 +12632,15 @@ // TODO: Decorelation of subquery should be done before attempting // Partition Pruning; otherwise Expression evaluation may try to execute // corelated sub query. - basePlan = hepPlan(basePlan, true, mdProvider, new PushFilterPastProjectRule( + + // Push Down Semi Joins + basePlan = hepPlan(basePlan, true, mdProvider, + PushSemiJoinPastJoinRule.INSTANCE, + new PushSemiJoinPastFilterRule(HiveFilterRel.DEFAULT_FILTER_FACTORY), + new PushSemiJoinPastProjectRule(HiveProjectRel.DEFAULT_PROJECT_FACTORY)); + + basePlan = hepPlan(basePlan, true, mdProvider, + new PushFilterPastProjectRule( FilterRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class, HiveProjectRel.DEFAULT_PROJECT_FACTORY), new PushFilterPastSetOpRule( HiveFilterRel.DEFAULT_FILTER_FACTORY), new MergeFilterRule( Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java (working copy) @@ -18,14 +18,15 @@ package org.apache.hadoop.hive.ql.plan; +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + import java.io.Serializable; import java.util.ArrayList; import java.util.List; -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; - /** * ExprNodeColumnDesc. * @@ -56,6 +57,10 @@ public ExprNodeColumnDesc() { } + public ExprNodeColumnDesc(ColumnInfo ci) { + this(ci.getType(), ci.getInternalName(), ci.getTabAlias(), ci.getIsVirtualCol()); + } + public ExprNodeColumnDesc(TypeInfo typeInfo, String column, String tabAlias, boolean isPartitionColOrVirtualCol) { super(typeInfo); Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java (working copy) @@ -193,6 +193,7 @@ /** * Convert expressions in current operator to those in terminal operator, which * is an ancestor of current or null (back to top operator). + * Possibly contain null values for non-traceable exprs */ public static ArrayList backtrack(List sources, Operator current, Operator terminal) throws SemanticException { @@ -396,29 +397,34 @@ * Get Map of ExprNodeColumnDesc HashCode to ExprNodeColumnDesc. * * @param exprDesc - * @param hashCodeTocolumnDescMap + * @param hashCodeToColumnDescMap * Assumption: If two ExprNodeColumnDesc have same hash code then * they are logically referring to same projection */ public static void getExprNodeColumnDesc(ExprNodeDesc exprDesc, - Map hashCodeTocolumnDescMap) { + Map hashCodeToColumnDescMap) { if (exprDesc instanceof ExprNodeColumnDesc) { - hashCodeTocolumnDescMap.put( - ((ExprNodeColumnDesc) exprDesc).hashCode(), - ((ExprNodeColumnDesc) exprDesc)); + hashCodeToColumnDescMap.put(exprDesc.hashCode(), exprDesc); } else if (exprDesc instanceof ExprNodeColumnListDesc) { - for (ExprNodeDesc child : ((ExprNodeColumnListDesc) exprDesc) - .getChildren()) { - getExprNodeColumnDesc(child, hashCodeTocolumnDescMap); + for (ExprNodeDesc child : exprDesc.getChildren()) { + getExprNodeColumnDesc(child, hashCodeToColumnDescMap); } } else if (exprDesc instanceof ExprNodeGenericFuncDesc) { - for (ExprNodeDesc child : ((ExprNodeGenericFuncDesc) exprDesc) - .getChildren()) { - getExprNodeColumnDesc(child, hashCodeTocolumnDescMap); + for (ExprNodeDesc child : exprDesc.getChildren()) { + getExprNodeColumnDesc(child, hashCodeToColumnDescMap); } } else if (exprDesc instanceof ExprNodeFieldDesc) { getExprNodeColumnDesc(((ExprNodeFieldDesc) exprDesc).getDesc(), - hashCodeTocolumnDescMap); + hashCodeToColumnDescMap); } } + + public static boolean isAllConstants(List value) { + for (ExprNodeDesc expr : value) { + if (!(expr instanceof ExprNodeConstantDesc)) { + return false; + } + } + return true; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java (working copy) @@ -19,16 +19,13 @@ import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -143,7 +140,7 @@ * @return converted expression for give node. If there is none then returns * null. */ - public ExprNodeDesc getConvertedNode(Node nd) { + public ExprNodeDesc getConvertedNode(ExprNodeDesc nd) { ExprInfo ei = exprInfoMap.get(nd); if (ei == null) { return null; @@ -238,13 +235,13 @@ * @param expr */ public void addFinalCandidate(ExprNodeDesc expr) { - String alias = getAlias(expr); - if (pushdownPreds.get(alias) == null) { - pushdownPreds.put(alias, new ArrayList()); - } - pushdownPreds.get(alias).add(expr); + addFinalCandidate(getAlias(expr), expr); } + public void addFinalCandidate(String alias, ExprNodeDesc expr) { + getPushdownPreds(alias).add(expr); + } + /** * Adds the passed list of pushDowns for the alias. * @@ -252,10 +249,7 @@ * @param pushDowns */ public void addPushDowns(String alias, List pushDowns) { - if (pushdownPreds.get(alias) == null) { - pushdownPreds.put(alias, new ArrayList()); - } - pushdownPreds.get(alias).addAll(pushDowns); + getPushdownPreds(alias).addAll(pushDowns); } /** @@ -269,6 +263,26 @@ return pushdownPreds; } + private List getPushdownPreds(String alias) { + List predicates = pushdownPreds.get(alias); + if (predicates == null) { + pushdownPreds.put(alias, predicates = new ArrayList()); + } + return predicates; + } + + public boolean hasAnyCandidates() { + if (pushdownPreds == null || pushdownPreds.isEmpty()) { + return false; + } + for (List exprs : pushdownPreds.values()) { + if (!exprs.isEmpty()) { + return true; + } + } + return false; + } + /** * Adds the specified expr as a non-final candidate * Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (working copy) @@ -31,7 +31,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.LateralViewJoinOperator; @@ -48,9 +47,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.ASTNode; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -342,14 +338,12 @@ super.process(nd, stack, procCtx, nodeOutputs); OpWalkerInfo owi = (OpWalkerInfo) procCtx; ExprWalkerInfo prunedPred = owi.getPrunedPreds((Operator) nd); - if (prunedPred == null) { + if (prunedPred == null || !prunedPred.hasAnyCandidates()) { return null; } Map> candidates = prunedPred.getFinalCandidates(); - if (candidates != null && !candidates.isEmpty()) { - createFilter((Operator)nd, prunedPred, owi); - candidates.clear(); - } + createFilter((Operator)nd, prunedPred, owi); + candidates.clear(); return null; } @@ -476,7 +470,12 @@ Set toRemove = new HashSet(); // we don't push down any expressions that refer to aliases that can;t // be pushed down per getQualifiedAliases - for (String key : prunePreds.getFinalCandidates().keySet()) { + for (Entry> entry : prunePreds.getFinalCandidates().entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + if (key == null && ExprNodeDescUtils.isAllConstants(value)) { + continue; // propagate constants + } if (!aliases.contains(key)) { toRemove.add(key); } @@ -517,200 +516,7 @@ return getQualifiedAliases((JoinOperator) nd, owi.getRowResolver(nd)); } - @Override - protected Object handlePredicates(Node nd, ExprWalkerInfo prunePreds, OpWalkerInfo owi) - throws SemanticException { - if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDRECOGNIZETRANSITIVITY)) { - applyFilterTransitivity((JoinOperator) nd, owi); - } - return super.handlePredicates(nd, prunePreds, owi); - } - /** - * Adds additional pushdown predicates for a join operator by replicating - * filters transitively over all the equijoin conditions. - * - * If we have a predicate "t.col=1" and the equijoin conditions - * "t.col=s.col" and "t.col=u.col", we add the filters "s.col=1" and - * "u.col=1". Note that this does not depend on the types of joins (ie. - * inner, left/right/full outer) between the tables s, t and u because if - * a predicate, eg. "t.col=1" is present in getFinalCandidates() at this - * point, we have already verified that it can be pushed down, so any rows - * emitted must satisfy s.col=t.col=u.col=1 and replicating the filters - * like this is ok. - */ - private void applyFilterTransitivity(JoinOperator nd, OpWalkerInfo owi) - throws SemanticException { - ExprWalkerInfo prunePreds = - owi.getPrunedPreds(nd); - if (prunePreds != null) { - // We want to use the row resolvers of the parents of the join op - // because the rowresolver refers to the output columns of an operator - // and the filters at this point refer to the input columns of the join - // operator. - Map aliasToRR = - new HashMap(); - for (Operator o : (nd).getParentOperators()) { - for (String alias : owi.getRowResolver(o).getTableNames()){ - aliasToRR.put(alias, owi.getRowResolver(o)); - } - } - - // eqExpressions is a list of ArrayList's, one for each table - // in the join. Then for each i, j and k, the join condition is that - // eqExpressions[i][k]=eqExpressions[j][k] (*) (ie. the columns referenced - // by the corresponding ASTNodes are equal). For example, if the query - // was SELECT * FROM a join b on a.col=b.col and a.col2=b.col2 left - // outer join c on b.col=c.col and b.col2=c.col2 WHERE c.col=1, - // eqExpressions would be [[a.col1, a.col2], [b.col1, b.col2], - // [c.col1, c.col2]]. - // - // numEqualities is the number of equal columns in each equality - // "chain" and numColumns is the number of such chains. - // - // Note that (*) is guaranteed to be true for the - // join operator: if the equijoin condititions can't be expressed in - // these equal-length lists of equal columns (for example if we had the - // query SELECT * FROM a join b on a.col=b.col and a.col2=b.col2 left - // outer join c on b.col=c.col), more than one join operator is used. - ArrayList> eqExpressions = - owi.getParseContext().getJoinContext().get(nd).getExpressions(); - int numColumns = eqExpressions.size(); - int numEqualities = eqExpressions.get(0).size(); - - // oldFilters contains the filters to be pushed down - Map> oldFilters = - prunePreds.getFinalCandidates(); - Map> newFilters = - new HashMap>(); - - // We loop through for each chain of equalities - for (int i=0; i colsreferenced = - new HashSet(expr.getCols()); - if (colsreferenced.size() == 1 - && colsreferenced.contains(left.getInternalName())){ - ExprNodeDesc newexpr = expr.clone(); - // Replace the column reference in the filter - replaceColumnReference(newexpr, left.getInternalName(), - right.getInternalName()); - if (newFilters.get(right.getTabAlias()) == null) { - newFilters.put(right.getTabAlias(), - new ArrayList()); - } - newFilters.get(right.getTabAlias()).add(newexpr); - } - } - } - } - } - } - } - // Push where false filter transitively - Map> candidates = prunePreds.getNonFinalCandidates(); - List exprs; - // where false is not associated with any alias in candidates - if (null != candidates && candidates.get(null) != null && ((exprs = candidates.get(null)) != null)) { - Iterator itr = exprs.iterator(); - while (itr.hasNext()) { - ExprNodeDesc expr = itr.next(); - if (expr instanceof ExprNodeConstantDesc && Boolean.FALSE.equals(((ExprNodeConstantDesc)expr).getValue())) { - // push this 'where false' expr to all aliases - for (String alias : aliasToRR.keySet()) { - List pushedFilters = newFilters.get(alias); - if (null == pushedFilters) { - newFilters.put(alias, new ArrayList()); - - } - newFilters.get(alias).add(expr); - } - // this filter is pushed, we can remove it from non-final candidates. - itr.remove(); - } - } - } - for (Entry> aliasToFilters - : newFilters.entrySet()){ - owi.getPrunedPreds(nd) - .addPushDowns(aliasToFilters.getKey(), aliasToFilters.getValue()); - } - } - } - - /** - * Replaces the ColumnInfo for the column referred to by an ASTNode - * representing "table.column" or null if the ASTNode is not in that form - */ - private ColumnInfo getColumnInfoFromAST(ASTNode nd, - Map aliastoRR) throws SemanticException { - // this bit is messy since we are parsing an ASTNode at this point - if (nd.getType()==HiveParser.DOT) { - if (nd.getChildCount()==2) { - if (nd.getChild(0).getType()==HiveParser.TOK_TABLE_OR_COL - && nd.getChild(0).getChildCount()==1 - && nd.getChild(1).getType()==HiveParser.Identifier){ - // We unescape the identifiers and make them lower case--this - // really shouldn't be done here, but getExpressions gives us the - // raw ASTNodes. The same thing is done in SemanticAnalyzer. - // parseJoinCondPopulateAlias(). - String alias = BaseSemanticAnalyzer.unescapeIdentifier( - nd.getChild(0).getChild(0).getText().toLowerCase()); - String column = BaseSemanticAnalyzer.unescapeIdentifier( - nd.getChild(1).getText().toLowerCase()); - RowResolver rr=aliastoRR.get(alias); - if (rr == null) { - return null; - } - return rr.get(alias, column); - } - } - } - return null; - } - - /** - * Replaces all instances of oldColumn with newColumn in the - * ExprColumnDesc's of the ExprNodeDesc - */ - private void replaceColumnReference(ExprNodeDesc expr, - String oldColumn, String newColumn) { - if (expr instanceof ExprNodeColumnDesc) { - if (((ExprNodeColumnDesc) expr).getColumn().equals(oldColumn)){ - ((ExprNodeColumnDesc) expr).setColumn(newColumn); - } - } - - if (expr.getChildren() != null){ - for (ExprNodeDesc childexpr : expr.getChildren()) { - replaceColumnReference(childexpr, oldColumn, newColumn); - } - } - } - - /** * Figures out the aliases for whom it is safe to push predicates based on * ANSI SQL semantics. The join conditions are left associative so "a * RIGHT OUTER JOIN b LEFT OUTER JOIN c INNER JOIN d" is interpreted as @@ -760,6 +566,86 @@ } } + public static class ReduceSinkPPD extends DefaultPPD implements NodeProcessor { + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + super.process(nd, stack, procCtx, nodeOutputs); + Operator operator = (Operator) nd; + OpWalkerInfo owi = (OpWalkerInfo) procCtx; + if (operator.getNumChild() == 1 && + operator.getChildOperators().get(0) instanceof JoinOperator) { + if (HiveConf.getBoolVar(owi.getParseContext().getConf(), + HiveConf.ConfVars.HIVEPPDRECOGNIZETRANSITIVITY)) { + JoinOperator child = (JoinOperator) operator.getChildOperators().get(0); + int targetPos = child.getParentOperators().indexOf(operator); + applyFilterTransitivity(child, targetPos, owi); + } + } + return null; + } + + /** + * Adds additional pushdown predicates for a join operator by replicating + * filters transitively over all the equijoin conditions. + * + * If we have a predicate "t.col=1" and the equijoin conditions + * "t.col=s.col" and "t.col=u.col", we add the filters "s.col=1" and + * "u.col=1". Note that this does not depend on the types of joins (ie. + * inner, left/right/full outer) between the tables s, t and u because if + * a predicate, eg. "t.col=1" is present in getFinalCandidates() at this + * point, we have already verified that it can be pushed down, so any rows + * emitted must satisfy s.col=t.col=u.col=1 and replicating the filters + * like this is ok. + */ + private void applyFilterTransitivity(JoinOperator join, int targetPos, OpWalkerInfo owi) + throws SemanticException { + + ExprWalkerInfo joinPreds = owi.getPrunedPreds(join); + if (joinPreds == null || !joinPreds.hasAnyCandidates()) { + return; + } + Map> oldFilters = joinPreds.getFinalCandidates(); + Map> newFilters = new HashMap>(); + + List> parentOperators = join.getParentOperators(); + + ReduceSinkOperator target = (ReduceSinkOperator) parentOperators.get(targetPos); + List targetKeys = target.getConf().getKeyCols(); + + ExprWalkerInfo rsPreds = owi.getPrunedPreds(target); + for (int sourcePos = 0; sourcePos < parentOperators.size(); sourcePos++) { + ReduceSinkOperator source = (ReduceSinkOperator) parentOperators.get(sourcePos); + List sourceKeys = source.getConf().getKeyCols(); + Set sourceAliases = new HashSet(Arrays.asList(source.getInputAliases())); + for (Map.Entry> entry : oldFilters.entrySet()) { + if (entry.getKey() == null && ExprNodeDescUtils.isAllConstants(entry.getValue())) { + // propagate constants + for (String targetAlias : target.getInputAliases()) { + rsPreds.addPushDowns(targetAlias, entry.getValue()); + } + continue; + } + if (!sourceAliases.contains(entry.getKey())) { + continue; + } + for (ExprNodeDesc predicate : entry.getValue()) { + ExprNodeDesc backtrack = ExprNodeDescUtils.backtrack(predicate, join, source); + if (backtrack == null) { + continue; + } + ExprNodeDesc replaced = ExprNodeDescUtils.replace(backtrack, sourceKeys, targetKeys); + if (replaced == null) { + continue; + } + for (String targetAlias : target.getInputAliases()) { + rsPreds.addFinalCandidate(targetAlias, replaced); + } + } + } + } + } + } + /** * Default processor which just merges its children. */ @@ -900,11 +786,10 @@ protected static Object createFilter(Operator op, ExprWalkerInfo pushDownPreds, OpWalkerInfo owi) { - if (pushDownPreds == null || pushDownPreds.getFinalCandidates() == null - || pushDownPreds.getFinalCandidates().size() == 0) { - return null; + if (pushDownPreds != null && pushDownPreds.hasAnyCandidates()) { + return createFilter(op, pushDownPreds.getFinalCandidates(), owi); } - return createFilter(op, pushDownPreds.getFinalCandidates(), owi); + return null; } protected static Object createFilter(Operator op, @@ -1113,6 +998,10 @@ return new JoinerPPD(); } + public static NodeProcessor getRSProc() { + return new ReduceSinkPPD(); + } + private OpProcFactory() { // prevent instantiation } Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java (working copy) @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.LimitOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.PTFOperator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.ScriptOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.UDTFOperator; @@ -118,6 +119,9 @@ opRules.put(new RuleRegExp("R9", LateralViewJoinOperator.getOperatorName() + "%"), OpProcFactory.getLVJProc()); + opRules.put(new RuleRegExp("R10", + ReduceSinkOperator.getOperatorName() + "%"), + OpProcFactory.getRSProc()); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java (working copy) @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.ql.exec.CommonJoinOperator; import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; -import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; @@ -179,40 +178,6 @@ return null; } - // calculate filter propagation directions for each alias - // L<->R for innner/semi join, L->R for left outer join, R->L for right outer join - private int[][] getTargets(CommonJoinOperator join) { - JoinCondDesc[] conds = join.getConf().getConds(); - - int aliases = conds.length + 1; - Vectors vector = new Vectors(aliases); - for (JoinCondDesc cond : conds) { - int left = cond.getLeft(); - int right = cond.getRight(); - switch (cond.getType()) { - case JoinDesc.INNER_JOIN: - case JoinDesc.LEFT_SEMI_JOIN: - vector.add(left, right); - vector.add(right, left); - break; - case JoinDesc.LEFT_OUTER_JOIN: - vector.add(left, right); - break; - case JoinDesc.RIGHT_OUTER_JOIN: - vector.add(right, left); - break; - case JoinDesc.FULL_OUTER_JOIN: - break; - } - } - int[][] result = new int[aliases][]; - for (int pos = 0 ; pos < aliases; pos++) { - // find all targets recursively - result[pos] = vector.traverse(pos); - } - return result; - } - // check same filter exists already private boolean filterExists(ReduceSinkOperator target, ExprNodeDesc replaced) { Operator operator = target.getParentOperators().get(0); @@ -226,6 +191,40 @@ } } + // calculate filter propagation directions for each alias + // L<->R for inner/semi join, L->R for left outer join, R->L for right outer join + public static int[][] getTargets(CommonJoinOperator join) { + JoinCondDesc[] conds = join.getConf().getConds(); + + int aliases = conds.length + 1; + Vectors vector = new Vectors(aliases); + for (JoinCondDesc cond : conds) { + int left = cond.getLeft(); + int right = cond.getRight(); + switch (cond.getType()) { + case JoinDesc.INNER_JOIN: + case JoinDesc.LEFT_SEMI_JOIN: + vector.add(left, right); + vector.add(right, left); + break; + case JoinDesc.LEFT_OUTER_JOIN: + vector.add(left, right); + break; + case JoinDesc.RIGHT_OUTER_JOIN: + vector.add(right, left); + break; + case JoinDesc.FULL_OUTER_JOIN: + break; + } + } + int[][] result = new int[aliases][]; + for (int pos = 0 ; pos < aliases; pos++) { + // find all targets recursively + result[pos] = vector.traverse(pos); + } + return result; + } + private static class Vectors { private Set[] vector; @@ -245,10 +244,11 @@ public int[] traverse(int pos) { Set targets = new HashSet(); traverse(targets, pos); - return toArray(targets); + return toArray(targets, pos); } - private int[] toArray(Set values) { + private int[] toArray(Set values, int pos) { + values.remove(pos); int index = 0; int[] result = new int[values.size()]; for (int value : values) { Index: ql/src/java/org/apache/hadoop/hive/ql/security/ProxyUserAuthenticator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/ProxyUserAuthenticator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/security/ProxyUserAuthenticator.java (working copy) @@ -30,6 +30,8 @@ * but honours a proxy config setting proxy.user.name instead of the * current user if set. This allows server processes like webhcat which * proxy other users to easily specify an override if allowed. + * + * It is no longer necessary to use this class with WebHCat as of Hive 0.14. */ public class ProxyUserAuthenticator extends HadoopDefaultAuthenticator { Index: ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (working copy) @@ -809,6 +809,14 @@ return (ss != null) ? ss.getIsSilent() : isSilent; } + public void logInfo(String info) { + logInfo(info, null); + } + + public void logInfo(String info, String detail) { + LOG.info(info + StringUtils.defaultString(detail)); + } + public void printInfo(String info) { printInfo(info, null); } Index: ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java (working copy) @@ -18,14 +18,10 @@ package org.apache.hadoop.hive.ql.stats; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; +import com.google.common.math.DoubleMath; +import com.google.common.math.LongMath; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -42,6 +38,7 @@ import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -89,8 +86,14 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.tez.mapreduce.hadoop.MRJobConfig; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; public class StatsUtils { @@ -389,7 +392,7 @@ } if (s <= 0 && rc > 0) { - s = rc * avgRowSize; + s = safeMult(rc, avgRowSize); dataSizes.set(i, s); } } @@ -494,7 +497,7 @@ long result = 0; for (Long l : vals) { if (l > 0) { - result += l; + result = safeAdd(result, l); } } return result; @@ -1011,12 +1014,10 @@ if (colExprMap != null && rowSchema != null) { for (ColumnInfo ci : rowSchema.getSignature()) { String outColName = ci.getInternalName(); - outColName = StatsUtils.stripPrefixFromColumnName(outColName); String outTabAlias = ci.getTabAlias(); ExprNodeDesc end = colExprMap.get(outColName); ColStatistics colStat = getColStatisticsFromExpression(conf, parentStats, end); if (colStat != null) { - outColName = StatsUtils.stripPrefixFromColumnName(outColName); colStat.setColumnName(outColName); colStat.setTableAlias(outTabAlias); } @@ -1070,7 +1071,6 @@ ExprNodeColumnDesc encd = (ExprNodeColumnDesc) end; colName = encd.getColumn(); tabAlias = encd.getTabAlias(); - colName = stripPrefixFromColumnName(colName); if (encd.getIsPartitionColOrVirtualCol()) { @@ -1261,6 +1261,7 @@ if (cs != null) { String colType = cs.getColumnType(); long nonNullCount = numRows - cs.getNumNulls(); + double sizeOf = 0; if (colType.equalsIgnoreCase(serdeConstants.TINYINT_TYPE_NAME) || colType.equalsIgnoreCase(serdeConstants.SMALLINT_TYPE_NAME) || colType.equalsIgnoreCase(serdeConstants.INT_TYPE_NAME) @@ -1268,31 +1269,25 @@ || colType.equalsIgnoreCase(serdeConstants.BOOLEAN_TYPE_NAME) || colType.equalsIgnoreCase(serdeConstants.FLOAT_TYPE_NAME) || colType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)) { - - result += nonNullCount * cs.getAvgColLen(); + sizeOf = cs.getAvgColLen(); } else if (colType.equalsIgnoreCase(serdeConstants.STRING_TYPE_NAME) || colType.startsWith(serdeConstants.VARCHAR_TYPE_NAME) || colType.startsWith(serdeConstants.CHAR_TYPE_NAME)) { - int acl = (int) Math.round(cs.getAvgColLen()); - result += nonNullCount * JavaDataModel.get().lengthForStringOfLength(acl); + sizeOf = JavaDataModel.get().lengthForStringOfLength(acl); } else if (colType.equalsIgnoreCase(serdeConstants.BINARY_TYPE_NAME)) { - int acl = (int) Math.round(cs.getAvgColLen()); - result += nonNullCount * JavaDataModel.get().lengthForByteArrayOfSize(acl); + sizeOf = JavaDataModel.get().lengthForByteArrayOfSize(acl); } else if (colType.equalsIgnoreCase(serdeConstants.TIMESTAMP_TYPE_NAME)) { - - result += nonNullCount * JavaDataModel.get().lengthOfTimestamp(); + sizeOf = JavaDataModel.get().lengthOfTimestamp(); } else if (colType.startsWith(serdeConstants.DECIMAL_TYPE_NAME)) { - - result += nonNullCount * JavaDataModel.get().lengthOfDecimal(); + sizeOf = JavaDataModel.get().lengthOfDecimal(); } else if (colType.equalsIgnoreCase(serdeConstants.DATE_TYPE_NAME)) { - - result += nonNullCount * JavaDataModel.get().lengthOfDate(); + sizeOf = JavaDataModel.get().lengthOfDate(); } else { - - result += nonNullCount * cs.getAvgColLen(); + sizeOf = cs.getAvgColLen(); } + result = safeAdd(result, safeMult(nonNullCount, sizeOf)); } } @@ -1300,21 +1295,6 @@ } /** - * Remove KEY/VALUE prefix from column name - * @param colName - * - column name - * @return column name - */ - public static String stripPrefixFromColumnName(String colName) { - String stripedName = colName; - if (colName.startsWith("KEY") || colName.startsWith("VALUE")) { - // strip off KEY./VALUE. from column name - stripedName = colName.split("\\.")[1]; - } - return stripedName; - } - - /** * Returns fully qualified name of column * @param tabName * @param colName @@ -1363,38 +1343,42 @@ } /** - * Try to get fully qualified column name from expression node + * Get fully qualified column name from output key column names and column expression map * @param keyExprs - * - expression nodes + * - output key names * @param map * - column expression map * @return list of fully qualified names */ - public static List getFullQualifedColNameFromExprs(List keyExprs, + public static List getFullyQualifedReducerKeyNames(List keyExprs, Map map) { List result = Lists.newArrayList(); if (keyExprs != null) { - for (ExprNodeDesc end : keyExprs) { - String outColName = null; - for (Map.Entry entry : map.entrySet()) { - if (entry.getValue().isSame(end)) { - outColName = entry.getKey(); - outColName = stripPrefixFromColumnName(outColName); + for (String key : keyExprs) { + String colName = key; + ExprNodeDesc end = map.get(colName); + // if we couldn't get expression try prepending "KEY." prefix to reducer key column names + if (end == null) { + colName = Utilities.ReduceField.KEY.toString() + "." + key; + end = map.get(colName); + if (end == null) { + continue; } } if (end instanceof ExprNodeColumnDesc) { ExprNodeColumnDesc encd = (ExprNodeColumnDesc) end; - if (outColName == null) { - outColName = encd.getColumn(); - outColName = stripPrefixFromColumnName(outColName); - } String tabAlias = encd.getTabAlias(); - result.add(getFullyQualifiedColumnName(tabAlias, outColName)); + result.add(getFullyQualifiedColumnName(tabAlias, colName)); } else if (end instanceof ExprNodeGenericFuncDesc) { ExprNodeGenericFuncDesc enf = (ExprNodeGenericFuncDesc) end; - List cols = getFullQualifedColNameFromExprs(enf.getChildren(), map); - String joinedStr = Joiner.on(".").skipNulls().join(cols); - result.add(joinedStr); + String tabAlias = ""; + for (ExprNodeDesc childEnd : enf.getChildren()) { + if (childEnd instanceof ExprNodeColumnDesc) { + tabAlias = ((ExprNodeColumnDesc) childEnd).getTabAlias(); + break; + } + } + result.add(getFullyQualifiedColumnName(tabAlias, colName)); } else if (end instanceof ExprNodeConstantDesc) { ExprNodeConstantDesc encd = (ExprNodeConstantDesc) end; result.add(encd.getValue().toString()); @@ -1439,4 +1423,38 @@ conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB); return memory; } + + /** + * negative number of rows or data sizes are invalid. It could be because of + * long overflow in which case return Long.MAX_VALUE + * @param val - input value + * @return Long.MAX_VALUE if val is negative else val + */ + public static long getMaxIfOverflow(long val) { + return val < 0 ? Long.MAX_VALUE : val; + } + + /** Bounded multiplication - overflows become MAX_VALUE */ + public static long safeMult(long a, double b) { + double result = a * b; + return (result > Long.MAX_VALUE) ? Long.MAX_VALUE : (long)result; + } + + /** Bounded addition - overflows become MAX_VALUE */ + public static long safeAdd(long a, long b) { + try { + return LongMath.checkedAdd(a, b); + } catch (ArithmeticException ex) { + return Long.MAX_VALUE; + } + } + + /** Bounded multiplication - overflows become MAX_VALUE */ + public static long safeMult(long a, long b) { + try { + return LongMath.checkedMultiply(a, b); + } catch (ArithmeticException ex) { + return Long.MAX_VALUE; + } + } } Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java (working copy) @@ -67,7 +67,7 @@ @Override public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { - throw new HiveException(getClass().getSimpleName() + ": init not supported"); + return wrappedEval.init(m, parameters); } @Override Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java (working copy) @@ -294,7 +294,7 @@ @Override public String getDisplayString(String[] children) { - assert (children.length == 2); + assert (children.length == 2) : opDisplayName + " with " + children.length + " children"; return "(" + children[0] + " " + opDisplayName + " " + children[1] + ")"; } Index: ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java (revision 1637277) +++ ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java (working copy) @@ -18,17 +18,10 @@ package org.apache.hadoop.hive.ql.util; -import java.util.List; - -import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.data.ACL; public class ZooKeeperHiveHelper { public static final Log LOG = LogFactory.getLog(ZooKeeperHiveHelper.class.getName()); @@ -59,35 +52,7 @@ return quorum.toString(); } - /** - * Create a path on ZooKeeper, if it does not already exist ("mkdir -p") - * - * @param zooKeeperClient ZooKeeper session - * @param path string with ZOOKEEPER_PATH_SEPARATOR as the separator - * @param acl list of ACL entries - * @param createMode for create mode of each node in the patch - * @return - * @throws KeeperException - * @throws InterruptedException - */ - public static String createPathRecursively(ZooKeeper zooKeeperClient, String path, List acl, - CreateMode createMode) throws KeeperException, InterruptedException { - String[] pathComponents = StringUtils.splitByWholeSeparator(path, ZOOKEEPER_PATH_SEPARATOR); - String currentPath = ""; - for (String pathComponent : pathComponents) { - currentPath += ZOOKEEPER_PATH_SEPARATOR + pathComponent; - try { - String node = zooKeeperClient.create(currentPath, new byte[0], acl, createMode); - LOG.info("Created path: " + node); - } catch (KeeperException.NodeExistsException e) { - // Do nothing here - } - } - return currentPath; - } - - /** * A no-op watcher class */ public static class DummyWatcher implements Watcher { @@ -95,5 +60,4 @@ public void process(org.apache.zookeeper.WatchedEvent event) { } } - } Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java (working copy) @@ -106,7 +106,7 @@ VectorizedRowBatch vrg = fdr.getNext(); - vfo.processOp(vrg, 0); + vfo.getConditionEvaluator().evaluate(vrg); //Verify int rows = 0; Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java (working copy) @@ -38,7 +38,6 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureOutputOperator; import org.apache.hadoop.hive.ql.exec.vector.util.FakeVectorRowBatchFromConcat; @@ -632,9 +631,9 @@ "count", 2, Arrays.asList(new Object[]{ - new Decimal128(1), - new Decimal128(2), - new Decimal128(3)}), + HiveDecimal.create(1), + HiveDecimal.create(2), + HiveDecimal.create(3)}), 3L); } @@ -645,28 +644,28 @@ "max", 2, Arrays.asList(new Object[]{ - new Decimal128(1), - new Decimal128(2), - new Decimal128(3)}), - new Decimal128(3)); + HiveDecimal.create(1), + HiveDecimal.create(2), + HiveDecimal.create(3)}), + HiveDecimal.create(3)); testAggregateDecimal( "Decimal", "max", 2, Arrays.asList(new Object[]{ - new Decimal128(3), - new Decimal128(2), - new Decimal128(1)}), - new Decimal128(3)); + HiveDecimal.create(3), + HiveDecimal.create(2), + HiveDecimal.create(1)}), + HiveDecimal.create(3)); testAggregateDecimal( "Decimal", "max", 2, Arrays.asList(new Object[]{ - new Decimal128(2), - new Decimal128(3), - new Decimal128(1)}), - new Decimal128(3)); + HiveDecimal.create(2), + HiveDecimal.create(3), + HiveDecimal.create(1)}), + HiveDecimal.create(3)); } @Test @@ -676,29 +675,29 @@ "min", 2, Arrays.asList(new Object[]{ - new Decimal128(1), - new Decimal128(2), - new Decimal128(3)}), - new Decimal128(1)); + HiveDecimal.create(1), + HiveDecimal.create(2), + HiveDecimal.create(3)}), + HiveDecimal.create(1)); testAggregateDecimal( "Decimal", "min", 2, Arrays.asList(new Object[]{ - new Decimal128(3), - new Decimal128(2), - new Decimal128(1)}), - new Decimal128(1)); + HiveDecimal.create(3), + HiveDecimal.create(2), + HiveDecimal.create(1)}), + HiveDecimal.create(1)); testAggregateDecimal( "Decimal", "min", 2, Arrays.asList(new Object[]{ - new Decimal128(2), - new Decimal128(1), - new Decimal128(3)}), - new Decimal128(1)); + HiveDecimal.create(2), + HiveDecimal.create(1), + HiveDecimal.create(3)}), + HiveDecimal.create(1)); } @Test @@ -708,10 +707,10 @@ "sum", 2, Arrays.asList(new Object[]{ - new Decimal128(1), - new Decimal128(2), - new Decimal128(3)}), - new Decimal128(1+2+3)); + HiveDecimal.create(1), + HiveDecimal.create(2), + HiveDecimal.create(3)}), + HiveDecimal.create(1+2+3)); } @Test @@ -722,12 +721,12 @@ "sum", 4, Arrays.asList(new Object[]{ - new Decimal128("1234.2401", scale), - new Decimal128("1868.52", scale), - new Decimal128(0L, (short) 0), - new Decimal128("456.84", scale), - new Decimal128("121.89", scale)}), - new Decimal128("3681.4901", scale)); + HiveDecimal.create("1234.2401").setScale(scale), + HiveDecimal.create("1868.52").setScale(scale), + HiveDecimal.ZERO.setScale(scale), + HiveDecimal.create("456.84").setScale(scale), + HiveDecimal.create("121.89").setScale(scale)}), + HiveDecimal.create("3681.4901").setScale( scale)); } @Test @@ -737,9 +736,9 @@ "avg", 2, Arrays.asList(new Object[]{ - new Decimal128(1), - new Decimal128(2), - new Decimal128(3)}), + HiveDecimal.create(1), + HiveDecimal.create(2), + HiveDecimal.create(3)}), HiveDecimal.create((1+2+3)/3)); } @@ -750,9 +749,9 @@ "avg", 2, Arrays.asList(new Object[]{ - new Decimal128(-1), - new Decimal128(-2), - new Decimal128(-3)}), + HiveDecimal.create(-1), + HiveDecimal.create(-2), + HiveDecimal.create(-3)}), HiveDecimal.create((-1-2-3)/3)); } @@ -763,10 +762,10 @@ "variance", 2, Arrays.asList(new Object[]{ - new Decimal128(13), - new Decimal128(5), - new Decimal128(7), - new Decimal128(19)}), + HiveDecimal.create(13), + HiveDecimal.create(5), + HiveDecimal.create(7), + HiveDecimal.create(19)}), (double) 30); } @@ -777,10 +776,10 @@ "var_samp", 2, Arrays.asList(new Object[]{ - new Decimal128(13), - new Decimal128(5), - new Decimal128(7), - new Decimal128(19)}), + HiveDecimal.create(13), + HiveDecimal.create(5), + HiveDecimal.create(7), + HiveDecimal.create(19)}), (double) 40); } @@ -791,10 +790,10 @@ "stddev_pop", 2, Arrays.asList(new Object[]{ - new Decimal128(13), - new Decimal128(5), - new Decimal128(7), - new Decimal128(19)}), + HiveDecimal.create(13), + HiveDecimal.create(5), + HiveDecimal.create(7), + HiveDecimal.create(19)}), (double) Math.sqrt(30)); } @@ -805,10 +804,10 @@ "stddev_samp", 2, Arrays.asList(new Object[]{ - new Decimal128(13), - new Decimal128(5), - new Decimal128(7), - new Decimal128(19)}), + HiveDecimal.create(13), + HiveDecimal.create(5), + HiveDecimal.create(7), + HiveDecimal.create(19)}), (double) Math.sqrt(40)); } @@ -820,8 +819,8 @@ 2, new String[] {"decimal(38,0)", "bigint"}, Arrays.asList(new Object[]{ - new Decimal128(1),null, - new Decimal128(1), null}), + HiveDecimal.create(1),null, + HiveDecimal.create(1), null}), Arrays.asList(new Object[]{13L,null,7L, 19L})), buildHashMap(HiveDecimal.create(1), 20L, null, 19L)); } @@ -2095,12 +2094,12 @@ } else if (arr[0] instanceof HiveDecimalWritable) { HiveDecimalWritable hdw = (HiveDecimalWritable) arr[0]; HiveDecimal hd = hdw.getHiveDecimal(); - Decimal128 d128 = (Decimal128)expected; - assertEquals (key, d128.toBigDecimal(), hd.bigDecimalValue()); + HiveDecimal expectedDec = (HiveDecimal)expected; + assertEquals (key, expectedDec, hd); } else if (arr[0] instanceof HiveDecimal) { HiveDecimal hd = (HiveDecimal) arr[0]; - Decimal128 d128 = (Decimal128)expected; - assertEquals (key, d128.toBigDecimal(), hd.bigDecimalValue()); + HiveDecimal expectedDec = (HiveDecimal)expected; + assertEquals (key, expectedDec, hd); } else { Assert.fail("Unsupported result type: " + arr[0].getClass().getName()); } Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestConstantVectorExpression.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestConstantVectorExpression.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestConstantVectorExpression.java (working copy) @@ -23,7 +23,7 @@ import java.util.Arrays; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; @@ -43,7 +43,7 @@ ConstantVectorExpression doubleCve = new ConstantVectorExpression(1, 17.34); String str = "alpha"; ConstantVectorExpression bytesCve = new ConstantVectorExpression(2, str.getBytes()); - Decimal128 decVal = new Decimal128(25.8, (short) 1); + HiveDecimal decVal = HiveDecimal.create("25.8"); ConstantVectorExpression decimalCve = new ConstantVectorExpression(3, decVal); ConstantVectorExpression nullCve = new ConstantVectorExpression(4, "string", true); @@ -85,12 +85,12 @@ assertTrue(bcv.length[0] == alphaBytes.length); assertTrue(sameFirstKBytes(alphaBytes, bcv.vector[0], alphaBytes.length)); - assertTrue(25.8 == dv.vector[0].doubleValue()); + assertTrue(25.8 == dv.vector[0].getHiveDecimal().doubleValue()); // Evaluation of the decimal Constant Vector Expression after the vector is // modified. - ((DecimalColumnVector) (vrg.cols[3])).vector[0] = new Decimal128(39.7, (short) 1); + ((DecimalColumnVector) (vrg.cols[3])).vector[0].set(HiveDecimal.create("39.7")); decimalCve.evaluate(vrg); - assertTrue(25.8 == dv.vector[0].doubleValue()); + assertTrue(25.8 == dv.vector[0].getHiveDecimal().doubleValue()); } private boolean sameFirstKBytes(byte[] o1, byte[] o2, int k) { Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java (working copy) @@ -19,7 +19,8 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; import junit.framework.Assert; -import org.apache.hadoop.hive.common.type.Decimal128; + +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.junit.Test; @@ -32,183 +33,198 @@ @Test public void testFloor() { DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); - Decimal128 expected1 = new Decimal128(19, (short)0); + HiveDecimal d1 = HiveDecimal.create("19.56778"); + HiveDecimal expected1 = HiveDecimal.create("19"); DecimalUtil.floor(0, d1, dcv); - Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d2 = new Decimal128(23.0, (short) 5); - Decimal128 expected2 = new Decimal128(23, (short)0); + HiveDecimal d2 = HiveDecimal.create("23.00000"); + Assert.assertEquals(5, d2.scale()); + HiveDecimal expected2 = HiveDecimal.create("23"); DecimalUtil.floor(0, d2, dcv); - Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d3 = new Decimal128(-25.34567, (short) 5); - Decimal128 expected3 = new Decimal128(-26, (short)0); + HiveDecimal d3 = HiveDecimal.create("-25.34567"); + HiveDecimal expected3 = HiveDecimal.create("-26"); DecimalUtil.floor(0, d3, dcv); - Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d4 = new Decimal128(-17, (short) 5); - Decimal128 expected4 = new Decimal128(-17, (short)0); + HiveDecimal d4 = HiveDecimal.create("-17.00000"); + Assert.assertEquals(5, d4.scale()); + HiveDecimal expected4 = HiveDecimal.create("-17"); DecimalUtil.floor(0, d4, dcv); - Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d5 = new Decimal128(-0.3, (short) 5); - Decimal128 expected5 = new Decimal128(-1, (short)0); + HiveDecimal d5 = HiveDecimal.create("-0.30000"); + Assert.assertEquals(5, d5.scale()); + HiveDecimal expected5 = HiveDecimal.create("-1"); DecimalUtil.floor(0, d5, dcv); - Assert.assertEquals(0, expected5.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected5.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d6 = new Decimal128(0.3, (short) 5); - Decimal128 expected6 = new Decimal128(0, (short)0); + HiveDecimal d6 = HiveDecimal.create("0.30000"); + Assert.assertEquals(5, d6.scale()); + HiveDecimal expected6 = HiveDecimal.create("0"); DecimalUtil.floor(0, d6, dcv); - Assert.assertEquals(0, expected6.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected6.compareTo(dcv.vector[0].getHiveDecimal())); } @Test public void testCeiling() { DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); - Decimal128 expected1 = new Decimal128(20, (short)0); + HiveDecimal d1 = HiveDecimal.create("19.56778"); + HiveDecimal expected1 = HiveDecimal.create("20"); DecimalUtil.ceiling(0, d1, dcv); - Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d2 = new Decimal128(23.0, (short) 5); - Decimal128 expected2 = new Decimal128(23, (short)0); + HiveDecimal d2 = HiveDecimal.create("23.00000"); + Assert.assertEquals(5, d2.scale()); + HiveDecimal expected2 = HiveDecimal.create("23"); DecimalUtil.ceiling(0, d2, dcv); - Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d3 = new Decimal128(-25.34567, (short) 5); - Decimal128 expected3 = new Decimal128(-25, (short)0); + HiveDecimal d3 = HiveDecimal.create("-25.34567"); + HiveDecimal expected3 = HiveDecimal.create("-25"); DecimalUtil.ceiling(0, d3, dcv); - Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d4 = new Decimal128(-17, (short) 5); - Decimal128 expected4 = new Decimal128(-17, (short)0); + HiveDecimal d4 = HiveDecimal.create("-17.00000"); + Assert.assertEquals(5, d4.scale()); + HiveDecimal expected4 = HiveDecimal.create("-17"); DecimalUtil.ceiling(0, d4, dcv); - Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d5 = new Decimal128(-0.3, (short) 5); - Decimal128 expected5 = new Decimal128(0, (short)0); + HiveDecimal d5 = HiveDecimal.create("-0.30000"); + Assert.assertEquals(5, d5.scale()); + HiveDecimal expected5 = HiveDecimal.create("0"); DecimalUtil.ceiling(0, d5, dcv); - Assert.assertEquals(0, expected5.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected5.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d6 = new Decimal128(0.3, (short) 5); - Decimal128 expected6 = new Decimal128(1, (short)0); + HiveDecimal d6 = HiveDecimal.create("0.30000"); + Assert.assertEquals(5, d6.scale()); + HiveDecimal expected6 = HiveDecimal.create("1"); DecimalUtil.ceiling(0, d6, dcv); - Assert.assertEquals(0, expected6.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected6.compareTo(dcv.vector[0].getHiveDecimal())); } @Test public void testAbs() { DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); + HiveDecimal d1 = HiveDecimal.create("19.56778"); DecimalUtil.abs(0, d1, dcv); - Assert.assertEquals(0, d1.compareTo(dcv.vector[0])); + Assert.assertEquals(0, d1.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d2 = new Decimal128(-25.34567, (short) 5); - Decimal128 expected2 = new Decimal128(25.34567, (short)5); + HiveDecimal d2 = HiveDecimal.create("-25.34567"); + HiveDecimal expected2 = HiveDecimal.create("25.34567"); DecimalUtil.abs(0, d2, dcv); - Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); } @Test public void testRound() { DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 0); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); - Decimal128 expected1 = new Decimal128(20, (short)0); + HiveDecimal d1 = HiveDecimal.create("19.56778"); + HiveDecimal expected1 = HiveDecimal.create("20"); DecimalUtil.round(0, d1, dcv); - Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d2 = new Decimal128(23.0, (short) 5); - Decimal128 expected2 = new Decimal128(23, (short)0); + HiveDecimal d2 = HiveDecimal.create("23.00000"); + Assert.assertEquals(5, d2.scale()); + HiveDecimal expected2 = HiveDecimal.create("23"); DecimalUtil.round(0, d2, dcv); - Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d3 = new Decimal128(-25.34567, (short) 5); - Decimal128 expected3 = new Decimal128(-25, (short)0); + HiveDecimal d3 = HiveDecimal.create("-25.34567"); + HiveDecimal expected3 = HiveDecimal.create("-25"); DecimalUtil.round(0, d3, dcv); - Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d4 = new Decimal128(-17, (short) 5); - Decimal128 expected4 = new Decimal128(-17, (short)0); + HiveDecimal d4 = HiveDecimal.create("-17.00000"); + Assert.assertEquals(5, d4.scale()); + HiveDecimal expected4 = HiveDecimal.create("-17"); DecimalUtil.round(0, d4, dcv); - Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d5 = new Decimal128(19.36778, (short) 5); - Decimal128 expected5 = new Decimal128(19, (short)0); + HiveDecimal d5 = HiveDecimal.create("19.36778"); + HiveDecimal expected5 = HiveDecimal.create("19"); DecimalUtil.round(0, d5, dcv); - Assert.assertEquals(0, expected5.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected5.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d6 = new Decimal128(-25.54567, (short) 5); - Decimal128 expected6 = new Decimal128(-26, (short)0); + HiveDecimal d6 = HiveDecimal.create("-25.54567"); + HiveDecimal expected6 = HiveDecimal.create("-26"); DecimalUtil.round(0, d6, dcv); - Assert.assertEquals(0, expected6.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected6.compareTo(dcv.vector[0].getHiveDecimal())); } @Test public void testRoundWithDigits() { DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 3); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); - Decimal128 expected1 = new Decimal128(19.568, (short)3); + HiveDecimal d1 = HiveDecimal.create("19.56778"); + HiveDecimal expected1 = HiveDecimal.create("19.568"); DecimalUtil.round(0, d1, dcv); - Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d2 = new Decimal128(23.567, (short) 5); - Decimal128 expected2 = new Decimal128(23.567, (short)3); + HiveDecimal d2 = HiveDecimal.create("23.56700"); + Assert.assertEquals(5, d2.scale()); + HiveDecimal expected2 = HiveDecimal.create("23.567"); DecimalUtil.round(0, d2, dcv); - Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d3 = new Decimal128(-25.34567, (short) 5); - Decimal128 expected3 = new Decimal128(-25.346, (short)3); + HiveDecimal d3 = HiveDecimal.create("-25.34567"); + HiveDecimal expected3 = HiveDecimal.create("-25.346"); DecimalUtil.round(0, d3, dcv); - Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d4 = new Decimal128(-17.234, (short) 5); - Decimal128 expected4 = new Decimal128(-17.234, (short)3); + HiveDecimal d4 = HiveDecimal.create("-17.23400"); + Assert.assertEquals(5, d4.scale()); + HiveDecimal expected4 = HiveDecimal.create("-17.234"); DecimalUtil.round(0, d4, dcv); - Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d5 = new Decimal128(19.36748, (short) 5); - Decimal128 expected5 = new Decimal128(19.367, (short)3); + HiveDecimal d5 = HiveDecimal.create("19.36748"); + HiveDecimal expected5 = HiveDecimal.create("19.367"); DecimalUtil.round(0, d5, dcv); - Assert.assertEquals(0, expected5.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected5.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d6 = new Decimal128(-25.54537, (short) 5); - Decimal128 expected6 = new Decimal128(-25.545, (short)3); + HiveDecimal d6 = HiveDecimal.create("-25.54537"); + HiveDecimal expected6 = HiveDecimal.create("-25.545"); DecimalUtil.round(0, d6, dcv); - Assert.assertEquals(0, expected6.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected6.compareTo(dcv.vector[0].getHiveDecimal())); } @Test public void testNegate() { DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); - Decimal128 expected1 = new Decimal128(-19.56778, (short)5); + HiveDecimal d1 = HiveDecimal.create("19.56778"); + HiveDecimal expected1 = HiveDecimal.create("-19.56778"); DecimalUtil.negate(0, d1, dcv); - Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d2 = new Decimal128(-25.34567, (short) 5); - Decimal128 expected2 = new Decimal128(25.34567, (short)5); + HiveDecimal d2 = HiveDecimal.create("-25.34567"); + HiveDecimal expected2 = HiveDecimal.create("25.34567"); DecimalUtil.negate(0, d2, dcv); - Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); - Decimal128 d3 = new Decimal128(0, (short) 5); - Decimal128 expected3 = new Decimal128(0, (short)0); + HiveDecimal d3 = HiveDecimal.create("0.00000"); + Assert.assertEquals(5, d3.scale()); + HiveDecimal expected3 = HiveDecimal.create("0"); DecimalUtil.negate(0, d3, dcv); - Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); } @Test public void testSign() { LongColumnVector lcv = new LongColumnVector(4); - Decimal128 d1 = new Decimal128(19.56778, (short) 5); + HiveDecimal d1 = HiveDecimal.create("19.56778"); DecimalUtil.sign(0, d1, lcv); Assert.assertEquals(1, lcv.vector[0]); - Decimal128 d2 = new Decimal128(-25.34567, (short) 5); + HiveDecimal d2 = HiveDecimal.create("-25.34567"); DecimalUtil.sign(0, d2, lcv); Assert.assertEquals(-1, lcv.vector[0]); - Decimal128 d3 = new Decimal128(0, (short) 5); + HiveDecimal d3 = HiveDecimal.create("0.00000"); + Assert.assertEquals(5, d3.scale()); + d3.setScale(5); DecimalUtil.sign(0, d3, lcv); Assert.assertEquals(0, lcv.vector[0]); } Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java (working copy) @@ -23,7 +23,7 @@ import static org.junit.Assert.assertTrue; import junit.framework.Assert; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; @@ -315,9 +315,9 @@ // test without nulls expr.evaluate(b); - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-2.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("1.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-2.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("1.00"))); // test nulls propagation b = getVectorizedRowBatch3DecimalCols(); @@ -330,18 +330,18 @@ // Verify null output data entry is not 0, but rather the value specified by design, // which is the minimum non-0 value, 0.01 in this case. - assertTrue(r.vector[0].equals(new Decimal128("0.01", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.01"))); // test that overflow produces NULL b = getVectorizedRowBatch3DecimalCols(); c0 = (DecimalColumnVector) b.cols[0]; - c0.vector[0].update("9999999999999999.99", (short) 2); // set to max possible value + c0.vector[0].set(HiveDecimal.create("9999999999999999.99")); // set to max possible value r = (DecimalColumnVector) b.cols[2]; expr.evaluate(b); // will cause overflow for result at position 0, must yield NULL assertTrue(!r.noNulls && r.isNull[0]); // verify proper null output data value - assertTrue(r.vector[0].equals(new Decimal128("0.01", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.01"))); // test left input repeating b = getVectorizedRowBatch3DecimalCols(); @@ -349,25 +349,25 @@ c0.isRepeating = true; r = (DecimalColumnVector) b.cols[2]; expr.evaluate(b); - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("2.20", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("2.20", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("2.20"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("2.20"))); // test both inputs repeating DecimalColumnVector c1 = (DecimalColumnVector) b.cols[1]; c1.isRepeating = true; expr.evaluate(b); assertTrue(r.isRepeating); - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); // test right input repeating b = getVectorizedRowBatch3DecimalCols(); c1 = (DecimalColumnVector) b.cols[1]; c1.isRepeating = true; - c1.vector[0].update("2", (short) 2); + c1.vector[0].set(HiveDecimal.create("2.00")); r = (DecimalColumnVector) b.cols[2]; expr.evaluate(b); - assertTrue(r.vector[2].equals(new Decimal128("2", (short) 2))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("2.00"))); } // Spot check decimal column-column subtract @@ -379,14 +379,14 @@ // test without nulls expr.evaluate(b); - assertTrue(r.vector[0].equals(new Decimal128("0.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-4.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("-1.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-4.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("-1.00"))); // test that underflow produces NULL b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector c0 = (DecimalColumnVector) b.cols[0]; - c0.vector[0].update("-9999999999999999.99", (short) 2); // set to min possible value + c0.vector[0].set(HiveDecimal.create("-9999999999999999.99")); // set to min possible value r = (DecimalColumnVector) b.cols[2]; expr.evaluate(b); // will cause underflow for result at position 0, must yield NULL assertTrue(!r.noNulls && r.isNull[0]); @@ -401,16 +401,16 @@ // test without nulls expr.evaluate(b); - assertTrue(r.vector[0].equals(new Decimal128("1.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-3.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("1.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-3.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0.00"))); // test that underflow produces NULL b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector c0 = (DecimalColumnVector) b.cols[0]; - c0.vector[0].update("9999999999999999.99", (short) 2); // set to max possible value + c0.vector[0].set(HiveDecimal.create("9999999999999999.99")); // set to max possible value DecimalColumnVector c1 = (DecimalColumnVector) b.cols[1]; - c1.vector[0].update("2", (short) 2); + c1.vector[0].set(HiveDecimal.create("2.00")); r = (DecimalColumnVector) b.cols[2]; expr.evaluate(b); // will cause overflow for result at position 0, must yield NULL assertTrue(!r.noNulls && r.isNull[0]); @@ -422,15 +422,15 @@ @Test public void testDecimalColAddDecimalScalar() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128(1); + HiveDecimal d = HiveDecimal.create(1); VectorExpression expr = new DecimalColAddDecimalScalar(0, d, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-2.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("1.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-2.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("1"))); // test null propagation b = getVectorizedRowBatch3DecimalCols(); @@ -449,7 +449,7 @@ expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertTrue(r.isRepeating); - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); // test repeating case for null value b = getVectorizedRowBatch3DecimalCols(); @@ -466,7 +466,7 @@ // test that overflow produces null b = getVectorizedRowBatch3DecimalCols(); in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("9999999999999999.99", (short) 2); // set to max possible value + in.vector[0].set(HiveDecimal.create("9999999999999999.99")); // set to max possible value expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -480,16 +480,16 @@ @Test public void testDecimalColDivideDecimalScalar() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128("2.00", (short) 2); + HiveDecimal d = HiveDecimal.create("2.00"); VectorExpression expr = new DecimalColDivideDecimalScalar(0, d, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("0.60", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-1.65", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.6"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-1.65"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); // test null propagation b = getVectorizedRowBatch3DecimalCols(); @@ -508,7 +508,7 @@ expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertTrue(r.isRepeating); - assertTrue(r.vector[0].equals(new Decimal128("0.60", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.6"))); // test repeating case for null value b = getVectorizedRowBatch3DecimalCols(); @@ -525,7 +525,7 @@ // test that zero-divide produces null for all output values b = getVectorizedRowBatch3DecimalCols(); in = (DecimalColumnVector) b.cols[0]; - expr = new DecimalColDivideDecimalScalar(0, new Decimal128("0", (short) 2), 2); + expr = new DecimalColDivideDecimalScalar(0, HiveDecimal.create("0"), 2); expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -539,14 +539,14 @@ @Test public void testDecimalScalarDivideDecimalColumn() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128("3.96", (short) 2); // 1.20 * 3.30 + HiveDecimal d = HiveDecimal.create("3.96"); // 1.20 * 3.30 VectorExpression expr = new DecimalScalarDivideDecimalColumn(d, 0, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("3.30", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-1.20", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("3.3"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-1.2"))); assertFalse(r.noNulls); // entry 2 is null due to zero-divide assertTrue(r.isNull[2]); @@ -567,7 +567,7 @@ expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertTrue(r.isRepeating); - assertTrue(r.vector[0].equals(new Decimal128("3.30", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("3.3"))); // test repeating case for null value b = getVectorizedRowBatch3DecimalCols(); @@ -586,30 +586,32 @@ @Test public void testDecimalColModuloDecimalScalar() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128("2.00", (short) 2); + HiveDecimal d = HiveDecimal.create("2.00"); VectorExpression expr = new DecimalColModuloDecimalScalar(0, d, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("1.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-1.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("1.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-1.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); // try again with some different data values and divisor DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("15.40", (short) 2); - in.vector[1].update("-17.20", (short) 2); - in.vector[2].update("70.00", (short) 2); - d.update("4.75", (short) 2); + in.vector[0].set(HiveDecimal.create("15.40")); + in.vector[1].set(HiveDecimal.create("-17.20")); + in.vector[2].set(HiveDecimal.create("70.00")); + d = HiveDecimal.create("4.75"); + expr = new DecimalColModuloDecimalScalar(0, d, 2); expr.evaluate(b); - assertTrue(r.vector[0].equals(new Decimal128("1.15", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-2.95", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("3.50", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("1.15"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-2.95"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("3.50"))); // try a zero-divide to show a repeating NULL is produced - d.update("0", (short) 2); + d = HiveDecimal.create("0.00"); + expr = new DecimalColModuloDecimalScalar(0, d, 2); expr.evaluate(b); assertFalse(r.noNulls); assertTrue(r.isNull[0]); @@ -620,27 +622,28 @@ @Test public void testDecimalScalarModuloDecimalColumn() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128("2.00", (short) 2); + HiveDecimal d = HiveDecimal.create("2.00"); VectorExpression expr = new DecimalScalarModuloDecimalColumn(d, 0, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("0.80", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("2.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.80"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("2.00"))); assertFalse(r.noNulls); // entry 2 will be null due to zero-divide assertTrue(r.isNull[2]); // try again with some different data values DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("0.50", (short) 2); - in.vector[1].update("0.80", (short) 2); - in.vector[2].update("0.70", (short) 2); + expr = new DecimalScalarModuloDecimalColumn(d, 0, 2); + in.vector[0].set(HiveDecimal.create("0.50")); + in.vector[1].set(HiveDecimal.create("0.80")); + in.vector[2].set(HiveDecimal.create("0.70")); expr.evaluate(b); - assertTrue(r.vector[0].equals(new Decimal128("0.00", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("0.40", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0.60", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.00"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("0.40"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0.60"))); } @Test @@ -648,16 +651,16 @@ VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector in1 = (DecimalColumnVector) b.cols[1]; for (int i = 0; i < 3; i++) { - in1.vector[i] = new Decimal128("0.50", (short) 2); + in1.vector[i].set(HiveDecimal.create("0.50")); } VectorExpression expr = new DecimalColDivideDecimalColumn(0, 1, 2); expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; // all divides are by 0.50 so the result column is 2 times col 0. - assertTrue(r.vector[0].equals(new Decimal128("2.40", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-6.60", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.4"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-6.6"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); // test null on left b.cols[0].noNulls = false; @@ -692,14 +695,14 @@ b.cols[0].isRepeating = true; expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[2].equals(new Decimal128("1.20", (short) 2))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("1.2"))); // test repeating on right b = getVectorizedRowBatch3DecimalCols(); b.cols[1].isRepeating = true; expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); // test both repeating b = getVectorizedRowBatch3DecimalCols(); @@ -708,11 +711,11 @@ expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertTrue(r.isRepeating); - assertTrue(r.vector[0].equals(new Decimal128("1.20", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("1.2"))); // test zero-divide to show it results in NULL b = getVectorizedRowBatch3DecimalCols(); - ((DecimalColumnVector) b.cols[1]).vector[0].update(0); + ((DecimalColumnVector) b.cols[1]).vector[0].set(HiveDecimal.create("0")); expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -725,15 +728,15 @@ VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector in1 = (DecimalColumnVector) b.cols[1]; for (int i = 0; i < 3; i++) { - in1.vector[i] = new Decimal128("0.50", (short) 2); + in1.vector[i].set(HiveDecimal.create("0.50")); } VectorExpression expr = new DecimalColModuloDecimalColumn(0, 1, 2); expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("0.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-0.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-0.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); } /* Spot check correctness of decimal column subtract decimal scalar. The case for @@ -742,20 +745,20 @@ @Test public void testDecimalColSubtractDecimalScalar() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128(1); + HiveDecimal d = HiveDecimal.create(1); VectorExpression expr = new DecimalColSubtractDecimalScalar(0, d, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("0.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-4.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("-1.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-4.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("-1"))); // test that underflow produces null b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("-9999999999999999.99", (short) 2); // set to min possible value + in.vector[0].set(HiveDecimal.create("-9999999999999999.99")); // set to min possible value expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -768,20 +771,20 @@ @Test public void testDecimalColMultiplyDecimalScalar() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128(2); + HiveDecimal d = HiveDecimal.create(2); VectorExpression expr = new DecimalColMultiplyDecimalScalar(0, d, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("2.40", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-6.60", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.40"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-6.60"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); // test that overflow produces null b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("9999999999999999.99", (short) 2); // set to max possible value + in.vector[0].set(HiveDecimal.create("9999999999999999.99")); // set to max possible value expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -794,15 +797,15 @@ @Test public void testDecimalScalarAddDecimalColumn() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128(1); + HiveDecimal d = HiveDecimal.create(1); VectorExpression expr = new DecimalScalarAddDecimalColumn(d, 0, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-2.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("1.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-2.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("1"))); // test null propagation b = getVectorizedRowBatch3DecimalCols(); @@ -821,7 +824,7 @@ expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertTrue(r.isRepeating); - assertTrue(r.vector[0].equals(new Decimal128("2.20", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.20"))); // test repeating case for null value b = getVectorizedRowBatch3DecimalCols(); @@ -838,7 +841,7 @@ // test that overflow produces null b = getVectorizedRowBatch3DecimalCols(); in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("9999999999999999.99", (short) 2); // set to max possible value + in.vector[0].set(HiveDecimal.create("9999999999999999.99")); // set to max possible value expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -851,20 +854,20 @@ @Test public void testDecimalScalarSubtractDecimalColumn() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128(1); + HiveDecimal d = HiveDecimal.create(1); VectorExpression expr = new DecimalScalarSubtractDecimalColumn(d, 0, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("-0.20", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("4.30", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("1.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("-0.20"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("4.30"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("1"))); // test that overflow produces null b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("-9999999999999999.99", (short) 2); // set to min possible value + in.vector[0].set(HiveDecimal.create("-9999999999999999.99")); // set to min possible value expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -878,20 +881,20 @@ @Test public void testDecimalScalarMultiplyDecimalColumn() { VectorizedRowBatch b = getVectorizedRowBatch3DecimalCols(); - Decimal128 d = new Decimal128(2); + HiveDecimal d = HiveDecimal.create(2); VectorExpression expr = new DecimalScalarMultiplyDecimalColumn(d, 0, 2); // test without nulls expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[2]; - assertTrue(r.vector[0].equals(new Decimal128("2.40", (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-6.60", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("0", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("2.40"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-6.60"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("0"))); // test that overflow produces null b = getVectorizedRowBatch3DecimalCols(); DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[0].update("9999999999999999.99", (short) 2); // set to max possible value + in.vector[0].set(HiveDecimal.create("9999999999999999.99")); // set to max possible value expr.evaluate(b); r = (DecimalColumnVector) b.cols[2]; assertFalse(r.noNulls); @@ -905,13 +908,13 @@ b.cols[0] = v0 = new DecimalColumnVector(18, 2); b.cols[1] = v1 = new DecimalColumnVector(18, 2); b.cols[2] = new DecimalColumnVector(18, 2); - v0.vector[0].update("1.20", (short) 2); - v0.vector[1].update("-3.30", (short) 2); - v0.vector[2].update("0", (short) 2); + v0.vector[0].set(HiveDecimal.create("1.20")); + v0.vector[1].set(HiveDecimal.create("-3.30")); + v0.vector[2].set(HiveDecimal.create("0")); - v1.vector[0].update("1.00", (short) 2); - v1.vector[1].update("1.00", (short) 2); - v1.vector[2].update("1.00", (short) 2); + v1.vector[0].set(HiveDecimal.create("1.00")); + v1.vector[1].set(HiveDecimal.create("1.00")); + v1.vector[2].set(HiveDecimal.create("1.00")); b.size = 3; Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorExpressionWriters.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorExpressionWriters.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorExpressionWriters.java (working copy) @@ -84,8 +84,8 @@ return null; } - private Writable getWritableValue(TypeInfo ti, Decimal128 value) { - return new HiveDecimalWritable(HiveDecimal.create(value.toBigDecimal())); + private Writable getWritableValue(TypeInfo ti, HiveDecimal value) { + return new HiveDecimalWritable(value); } private Writable getWritableValue(TypeInfo ti, byte[] value) { @@ -163,7 +163,7 @@ for (int i = 0; i < vectorSize; i++) { Writable w = (Writable) vew.writeValue(dcv, i); if (w != null) { - Writable expected = getWritableValue(type, dcv.vector[i]); + Writable expected = getWritableValue(type, dcv.vector[i].getHiveDecimal()); Assert.assertEquals(expected, w); } else { Assert.assertTrue(dcv.isNull[i]); @@ -182,7 +182,7 @@ values[i] = null; // setValue() should be able to handle null input values[i] = vew.setValue(values[i], dcv, i); if (values[i] != null) { - Writable expected = getWritableValue(type, dcv.vector[i]); + Writable expected = getWritableValue(type, dcv.vector[i].getHiveDecimal()); Assert.assertEquals(expected, values[i]); } else { Assert.assertTrue(dcv.isNull[i]); Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java (working copy) @@ -24,7 +24,7 @@ import java.sql.Timestamp; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; @@ -830,8 +830,7 @@ @Test public void testFilterDecimalColEqualDecimalScalar() { VectorizedRowBatch b = getVectorizedRowBatch1DecimalCol(); - Decimal128 scalar = new Decimal128(); - scalar.update("-3.30", (short) 2); + HiveDecimal scalar = HiveDecimal.create("-3.30"); VectorExpression expr = new FilterDecimalColEqualDecimalScalar(0, scalar); expr.evaluate(b); @@ -876,8 +875,7 @@ @Test public void testFilterDecimalScalarEqualDecimalColumn() { VectorizedRowBatch b = getVectorizedRowBatch1DecimalCol(); - Decimal128 scalar = new Decimal128(); - scalar.update("-3.30", (short) 2); + HiveDecimal scalar = HiveDecimal.create("-3.30"); VectorExpression expr = new FilterDecimalScalarEqualDecimalColumn(scalar, 0); expr.evaluate(b); @@ -982,8 +980,7 @@ @Test public void testFilterDecimalColLessScalar() { VectorizedRowBatch b = getVectorizedRowBatch1DecimalCol(); - Decimal128 scalar = new Decimal128(); - scalar.update("0", (short) 2); + HiveDecimal scalar = HiveDecimal.create("0"); VectorExpression expr = new FilterDecimalColLessDecimalScalar(0, scalar); expr.evaluate(b); @@ -999,8 +996,7 @@ @Test public void testFilterDecimalScalarGreaterThanColumn() { VectorizedRowBatch b = getVectorizedRowBatch1DecimalCol(); - Decimal128 scalar = new Decimal128(); - scalar.update("0", (short) 2); + HiveDecimal scalar = HiveDecimal.create("0"); VectorExpression expr = new FilterDecimalScalarGreaterDecimalColumn(scalar, 0); expr.evaluate(b); @@ -1030,9 +1026,9 @@ VectorizedRowBatch b = new VectorizedRowBatch(1); DecimalColumnVector v0; b.cols[0] = v0 = new DecimalColumnVector(18, 2); - v0.vector[0].update("1.20", (short) 2); - v0.vector[1].update("-3.30", (short) 2); - v0.vector[2].update("0", (short) 2); + v0.vector[0].set(HiveDecimal.create("1.20")); + v0.vector[1].set(HiveDecimal.create("-3.30")); + v0.vector[2].set(HiveDecimal.create("0")); b.size = 3; return b; @@ -1042,14 +1038,14 @@ VectorizedRowBatch b = new VectorizedRowBatch(2); DecimalColumnVector v0, v1; b.cols[0] = v0 = new DecimalColumnVector(18, 2); - v0.vector[0].update("1.20", (short) 2); - v0.vector[1].update("-3.30", (short) 2); - v0.vector[2].update("0", (short) 2); + v0.vector[0].set(HiveDecimal.create("1.20")); + v0.vector[1].set(HiveDecimal.create("-3.30")); + v0.vector[2].set(HiveDecimal.create("0")); b.cols[1] = v1 = new DecimalColumnVector(18, 2); - v1.vector[0].update("-1", (short) 2); - v1.vector[1].update("-3.30", (short) 2); - v1.vector[2].update("10", (short) 2); + v1.vector[0].set(HiveDecimal.create("-1.00")); + v1.vector[1].set(HiveDecimal.create("-3.30")); + v1.vector[2].set(HiveDecimal.create("10.00")); b.size = 3; return b; Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java (working copy) @@ -28,6 +28,7 @@ import junit.framework.Assert; import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; @@ -231,7 +232,7 @@ VectorizedRowBatch b = getBatchDecimalLong(); VectorExpression expr = new CastDecimalToBoolean(0, 1); DecimalColumnVector in = (DecimalColumnVector) b.cols[0]; - in.vector[1].update(0); + in.vector[1].set(HiveDecimal.create(0)); expr.evaluate(b); LongColumnVector r = (LongColumnVector) b.cols[1]; assertEquals(1, r.vector[0]); @@ -248,9 +249,9 @@ b.size = 3; - dv.vector[0].update("1.1", scale); - dv.vector[1].update("-2.2", scale); - dv.vector[2].update("9999999999999999.00", scale); + dv.vector[0].set(HiveDecimal.create("1.1").setScale(scale)); + dv.vector[1].set(HiveDecimal.create("-2.2").setScale(scale)); + dv.vector[2].set(HiveDecimal.create("9999999999999999.00").setScale(scale)); return b; } @@ -308,9 +309,9 @@ b.size = 3; - dv.vector[0].update("1.1", scale); - dv.vector[1].update("-2.2", scale); - dv.vector[2].update("9999999999999999.00", scale); + dv.vector[0].set(HiveDecimal.create("1.1").setScale(scale)); + dv.vector[1].set(HiveDecimal.create("-2.2").setScale(scale)); + dv.vector[2].set(HiveDecimal.create("9999999999999999.00").setScale(scale)); return b; } @@ -322,12 +323,13 @@ expr.evaluate(b); BytesColumnVector r = (BytesColumnVector) b.cols[1]; - byte[] v = toBytes("1.1"); + byte[] v = toBytes("1.10"); + assertTrue(((Integer) v.length).toString() + " " + r.length[0], v.length == r.length[0]); Assert.assertEquals(0, StringExpr.compare(v, 0, v.length, r.vector[0], r.start[0], r.length[0])); - v = toBytes("-2.2"); + v = toBytes("-2.20"); Assert.assertEquals(0, StringExpr.compare(v, 0, v.length, r.vector[1], r.start[1], r.length[1])); @@ -347,9 +349,9 @@ b.size = 3; - dv.vector[0].update("1.1", scale); - dv.vector[1].update("-2.2", scale); - dv.vector[2].update("9999999999999999.00", scale); + dv.vector[0].set(HiveDecimal.create("1.1").setScale(scale)); + dv.vector[1].set(HiveDecimal.create("-2.2").setScale(scale)); + dv.vector[2].set(HiveDecimal.create("9999999999999999.00").setScale(scale)); return b; } @@ -374,9 +376,9 @@ b.size = 3; - dv.vector[0].update("1.111111111", scale); - dv.vector[1].update("-2.222222222", scale); - dv.vector[2].update("31536000.999999999", scale); + dv.vector[0].set(HiveDecimal.create("1.111111111").setScale(scale)); + dv.vector[1].set(HiveDecimal.create("-2.222222222").setScale(scale)); + dv.vector[2].set(HiveDecimal.create("31536000.999999999").setScale(scale)); return b; } @@ -387,9 +389,9 @@ VectorExpression expr = new CastLongToDecimal(0, 1); expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[1]; - assertTrue(r.vector[0].equals(new Decimal128(0, (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128(-1, (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128(99999999999999L, (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-1"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("99999999999999"))); } private VectorizedRowBatch getBatchLongDecimal() { @@ -410,9 +412,9 @@ expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[1]; - assertTrue(r.vector[0].equals(new Decimal128(0, r.scale))); - assertTrue(r.vector[1].equals(new Decimal128(-1, r.scale))); - assertTrue(r.vector[2].equals(new Decimal128("99999999999999.0", r.scale))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.0"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-1.0"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("99999999999999"))); } private VectorizedRowBatch getBatchDoubleDecimal() { @@ -437,9 +439,9 @@ VectorExpression expr = new CastStringToDecimal(0, 1); expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[1]; - assertTrue(r.vector[0].equals(new Decimal128("1.10", r.scale))); - assertTrue(r.vector[1].equals(new Decimal128("-2.20", r.scale))); - assertTrue(r.vector[2].equals(new Decimal128("99999999999999.0", r.scale))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("1.10"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-2.20"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("99999999999999.0"))); } private VectorizedRowBatch getBatchStringDecimal() { @@ -472,9 +474,9 @@ inL.vector[1] = -1990000000L; expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[1]; - assertTrue(r.vector[0].equals(new Decimal128(0, (short) 2))); - assertTrue(r.vector[1].equals(new Decimal128("-1.99", (short) 2))); - assertTrue(r.vector[2].equals(new Decimal128("100000.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("0.00"))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-1.99"))); + assertTrue(r.vector[2].getHiveDecimal().equals(HiveDecimal.create("100000.00"))); // Try again with a value that won't fit in 5 digits, to make // sure that NULL is produced. @@ -503,6 +505,7 @@ return b; } + /* @Test public void testCastDecimalToDecimal() { @@ -511,7 +514,7 @@ VectorExpression expr = new CastDecimalToDecimal(0, 1); expr.evaluate(b); DecimalColumnVector r = (DecimalColumnVector) b.cols[1]; - assertTrue(r.vector[0].equals(new Decimal128("10.00", (short) 2))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("10.00", (short) 2))); assertFalse(r.noNulls); assertTrue(r.isNull[1]); @@ -520,10 +523,11 @@ expr = new CastDecimalToDecimal(1, 0); expr.evaluate(b); r = (DecimalColumnVector) b.cols[0]; - assertTrue(r.vector[0].equals(new Decimal128("100.01", (short) 4))); - assertTrue(r.vector[1].equals(new Decimal128("-200.02", (short) 4))); + assertTrue(r.vector[0].getHiveDecimal().equals(HiveDecimal.create("100.01", (short) 4))); + assertTrue(r.vector[1].getHiveDecimal().equals(HiveDecimal.create("-200.02", (short) 4))); assertTrue(r.noNulls); } + */ private VectorizedRowBatch getBatchDecimalDecimal() { VectorizedRowBatch b = new VectorizedRowBatch(2); @@ -532,11 +536,11 @@ b.cols[0] = v0 = new DecimalColumnVector(18, 4); b.cols[1] = v1 = new DecimalColumnVector(5, 2); - v0.vector[0].update(new Decimal128("10.0001", (short) 4)); - v0.vector[1].update(new Decimal128("-9999999.9999", (short) 4)); + v0.vector[0].set(HiveDecimal.create("10.0001")); + v0.vector[1].set(HiveDecimal.create("-9999999.9999")); - v1.vector[0].update(new Decimal128("100.01", (short) 2)); - v1.vector[1].update(new Decimal128("-200.02", (short) 2)); + v1.vector[0].set(HiveDecimal.create("100.01")); + v1.vector[1].set(HiveDecimal.create("-200.02")); b.size = 2; return b; Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java (working copy) @@ -26,7 +26,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; @@ -168,7 +168,7 @@ int row, Object value) { DecimalColumnVector dcv = (DecimalColumnVector) columnVector; - dcv.vector[row] = (Decimal128)value; + dcv.set(row, (HiveDecimal) value); } }; } else { Index: ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/VectorizedRowGroupGenUtil.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/VectorizedRowGroupGenUtil.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/VectorizedRowGroupGenUtil.java (working copy) @@ -21,10 +21,12 @@ import java.util.Random; import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; public class VectorizedRowGroupGenUtil { @@ -114,10 +116,10 @@ dcv.noNulls = !nulls; dcv.isRepeating = repeating; - Decimal128 repeatingValue = new Decimal128(); + HiveDecimalWritable repeatingValue = new HiveDecimalWritable(); do{ - repeatingValue.update(rand.nextDouble(), (short)typeInfo.scale()); - }while(repeatingValue.doubleValue() == 0); + repeatingValue.set(HiveDecimal.create(((Double) rand.nextDouble()).toString()).setScale((short)typeInfo.scale())); + }while(repeatingValue.getHiveDecimal().doubleValue() == 0); int nullFrequency = generateNullFrequency(rand); @@ -129,12 +131,12 @@ }else { dcv.isNull[i] = false; if (repeating) { - dcv.vector[i].update(repeatingValue); + dcv.vector[i].set(repeatingValue); } else { - dcv.vector[i].update(rand.nextDouble(), (short) typeInfo.scale()); + dcv.vector[i].set(HiveDecimal.create(((Double) rand.nextDouble()).toString()).setScale((short) typeInfo.scale())); } - if(dcv.vector[i].doubleValue() == 0) { + if(dcv.vector[i].getHiveDecimal().doubleValue() == 0) { i--; } } Index: ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (working copy) @@ -51,7 +51,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -1466,8 +1465,8 @@ assertEquals("checking double " + i, i, doubleCoulmn.vector[i], 0.0001); assertEquals("checking string " + i, new Text(Long.toHexString(i)), stringColumn.getWritableObject(i)); - assertEquals("checking decimal " + i, new Decimal128(i), - decimalColumn.vector[i]); + assertEquals("checking decimal " + i, HiveDecimal.create(i), + decimalColumn.vector[i].getHiveDecimal()); assertEquals("checking date " + i, i, dateColumn.vector[i]); long millis = (long) i * MILLIS_IN_DAY; millis -= LOCAL_TIMEZONE.getOffset(millis); Index: ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java (working copy) @@ -143,22 +143,23 @@ cal.set(Calendar.YEAR, 1968); cal.set(Calendar.MONTH, Calendar.MAY); cal.set(Calendar.DAY_OF_MONTH, 23); - if ((TimeZone.getTimeZone("US/Pacific").inDaylightTime(new Date()))) { - cal.set(Calendar.HOUR_OF_DAY, 18); - } else { - cal.set(Calendar.HOUR_OF_DAY, 17); - } + cal.set(Calendar.HOUR_OF_DAY, 17); cal.set(Calendar.MINUTE, 1); cal.set(Calendar.SECOND, 1); cal.setTimeZone(TimeZone.getTimeZone("US/Pacific")); Timestamp ts = new Timestamp(cal.getTimeInMillis()); ts.setNanos(1); - //18:00 PST = 01:00 GMT (if daylight-savings) - //17:00 PST = 01:00 GMT (if not daylight savings) - //(1*60*60 + 1*60 + 1)*10e9 + 1 + /** + * 17:00 PDT = 00:00 GMT (daylight-savings) + * (0*60*60 + 1*60 + 1)*10e9 + 1 = 61000000001, or + * + * 17:00 PST = 01:00 GMT (if not daylight savings) + * (1*60*60 + 1*60 + 1)*10e9 + 1 = 3661000000001 + */ NanoTime nt = NanoTimeUtils.getNanoTime(ts); - Assert.assertEquals(nt.getTimeOfDayNanos(), 3661000000001L); + long timeOfDayNanos = nt.getTimeOfDayNanos(); + Assert.assertTrue(timeOfDayNanos == 61000000001L || timeOfDayNanos == 3661000000001L); //in both cases, this will be the next day in GMT Assert.assertEquals(nt.getJulianDay(), 2440001); Index: ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java (revision 1637277) +++ ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java (working copy) @@ -87,7 +87,8 @@ // create list with variables that match some of the regexes List confVarRegexList = Arrays.asList("hive.convert.join.bucket.mapjoin.tez", - "hive.optimize.index.filter.compact.maxsize", "hive.tez.dummy", "tez.task.dummy"); + "hive.optimize.index.filter.compact.maxsize", "hive.tez.dummy", "tez.task.dummy", + "hive.exec.dynamic.partition", "hive.exec.dynamic.partition.mode"); // combine two lists List varList = new ArrayList(); Index: ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q (working copy) @@ -1,19 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- testAggrFuncsWithNoGBYNoPartDef select p_mfgr, sum(p_retailprice) as s1 Index: ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q (working copy) @@ -1,19 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- testAmbiguousWindowDefn select p_mfgr, p_name, p_size, sum(p_size) over (w1) as s1, Index: ql/src/test/queries/clientnegative/ptf_negative_DistributeByOrderBy.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_DistributeByOrderBy.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_DistributeByOrderBy.q (working copy) @@ -1,17 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testPartitonBySortBy select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr order by p_mfgr) as s1 Index: ql/src/test/queries/clientnegative/ptf_negative_DuplicateWindowAlias.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_DuplicateWindowAlias.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_DuplicateWindowAlias.q (working copy) @@ -1,17 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testDuplicateWindowAlias select p_mfgr, p_name, p_size, sum(p_size) over (w1) as s1, Index: ql/src/test/queries/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q (working copy) @@ -1,17 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testHavingLeadWithNoGBYNoWindowing select p_mfgr,p_name, p_size from part Index: ql/src/test/queries/clientnegative/ptf_negative_HavingLeadWithPTF.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_HavingLeadWithPTF.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_HavingLeadWithPTF.q (working copy) @@ -1,17 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testHavingLeadWithPTF select p_mfgr,p_name, p_size from noop(on part Index: ql/src/test/queries/clientnegative/ptf_negative_InvalidValueBoundary.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_InvalidValueBoundary.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_InvalidValueBoundary.q (working copy) @@ -1,21 +1,6 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING, - p_complex array -); - -- testInvalidValueBoundary select p_mfgr,p_name, p_size, sum(p_size) over (w1) as s , dense_rank() over(w1) as dr -from part +from part window w1 as (partition by p_mfgr order by p_complex range between 2 preceding and current row); Index: ql/src/test/queries/clientnegative/ptf_negative_JoinWithAmbigousAlias.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_JoinWithAmbigousAlias.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_JoinWithAmbigousAlias.q (working copy) @@ -1,20 +1,6 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testJoinWithAmbigousAlias select abc.* -from noop(on part +from noop(on part partition by p_mfgr order by p_name ) abc join part on abc.p_partkey = p1.p_partkey; Index: ql/src/test/queries/clientnegative/ptf_negative_PartitionBySortBy.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_PartitionBySortBy.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_PartitionBySortBy.q (working copy) @@ -1,19 +1,5 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testPartitonBySortBy select p_mfgr, p_name, p_size, sum(p_retailprice) over (partition by p_mfgr sort by p_mfgr) as s1 -from part +from part ; Index: ql/src/test/queries/clientnegative/ptf_negative_WhereWithRankCond.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_negative_WhereWithRankCond.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_negative_WhereWithRankCond.q (working copy) @@ -1,17 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -- testWhereWithRankCond select p_mfgr,p_name, p_size, rank() over() as r Index: ql/src/test/queries/clientnegative/ptf_window_boundaries.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_window_boundaries.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_window_boundaries.q (working copy) @@ -1,16 +1,3 @@ --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - select p_mfgr, p_name, p_size, sum(p_retailprice) over (rows unbounded following) as s1 from part distribute by p_mfgr sort by p_name; Index: ql/src/test/queries/clientnegative/ptf_window_boundaries2.q =================================================================== --- ql/src/test/queries/clientnegative/ptf_window_boundaries2.q (revision 1637277) +++ ql/src/test/queries/clientnegative/ptf_window_boundaries2.q (working copy) @@ -1,16 +1,3 @@ --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - select p_mfgr, p_name, p_size, sum(p_retailprice) over (range unbounded following) as s1 from part distribute by p_mfgr sort by p_name; Index: ql/src/test/queries/clientnegative/subquery_nested_subquery.q =================================================================== --- ql/src/test/queries/clientnegative/subquery_nested_subquery.q (revision 1637277) +++ ql/src/test/queries/clientnegative/subquery_nested_subquery.q (working copy) @@ -1,17 +1,3 @@ - - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - select * from part x where x.p_name in (select y.p_name from part y where exists (select z.p_name from part z where y.p_name = z.p_name)) Index: ql/src/test/queries/clientnegative/subquery_windowing_corr.q =================================================================== --- ql/src/test/queries/clientnegative/subquery_windowing_corr.q (revision 1637277) +++ ql/src/test/queries/clientnegative/subquery_windowing_corr.q (working copy) @@ -1,21 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - -- corr and windowing select p_mfgr, p_name, p_size from part a Index: ql/src/test/queries/clientnegative/windowing_leadlag_in_udaf.q =================================================================== --- ql/src/test/queries/clientnegative/windowing_leadlag_in_udaf.q (revision 1637277) +++ ql/src/test/queries/clientnegative/windowing_leadlag_in_udaf.q (working copy) @@ -1,15 +1 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - select sum(lead(p_retailprice,1)) as s1 from part; Index: ql/src/test/queries/clientnegative/windowing_ll_no_neg.q =================================================================== --- ql/src/test/queries/clientnegative/windowing_ll_no_neg.q (revision 1637277) +++ ql/src/test/queries/clientnegative/windowing_ll_no_neg.q (working copy) @@ -1,21 +1,3 @@ -DROP TABLE IF EXISTS part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name)as r, Index: ql/src/test/queries/clientnegative/windowing_ll_no_over.q =================================================================== --- ql/src/test/queries/clientnegative/windowing_ll_no_over.q (revision 1637277) +++ ql/src/test/queries/clientnegative/windowing_ll_no_over.q (working copy) @@ -1,17 +1,3 @@ -DROP TABLE part; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - select p_mfgr, lead(p_retailprice,1) as s1 from part; Index: ql/src/test/queries/clientpositive/auto_join33.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join33.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join33.q (working copy) @@ -0,0 +1,16 @@ +set hive.auto.convert.join = true; + +-- SORT_QUERY_RESULTS + +explain +SELECT * FROM + (SELECT * FROM src WHERE key+1 < 10) a + JOIN + (SELECT * FROM src WHERE key+2 < 10) b + ON a.key+1=b.key+2; + +SELECT * FROM + (SELECT * FROM src WHERE key+1 < 10) a + JOIN + (SELECT * FROM src WHERE key+2 < 10) b + ON a.key+1=b.key+2; Index: ql/src/test/queries/clientpositive/cbo_correctness.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_correctness.q (revision 1637277) +++ ql/src/test/queries/clientpositive/cbo_correctness.q (working copy) @@ -1,488 +0,0 @@ -set hive.cbo.enable=true; -set hive.exec.check.crossproducts=false; - -drop table if exists t1; -drop table if exists t2; -drop table if exists t3; - -create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; -create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; -create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE; - -load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014'); -load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014'); -load data local inpath '../../data/files/cbo_t3.txt' into table t3; - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -DROP TABLE lineitem; -CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; - -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; - -create table src_cbo as select * from src; - - -set hive.stats.dbclass=jdbc:derby; -analyze table t1 partition (dt) compute statistics; -analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean; -analyze table t2 partition (dt) compute statistics; -analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean; -analyze table t3 compute statistics; -analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean; -analyze table src_cbo compute statistics; -analyze table src_cbo compute statistics for columns; -analyze table part compute statistics; -analyze table part compute statistics for columns; -analyze table lineitem compute statistics; -analyze table lineitem compute statistics for columns; - -set hive.stats.fetch.column.stats=true; -set hive.auto.convert.join=false; - --- 1. Test Select + TS -select * from t1; -select * from t1 as t1; -select * from t1 as t2; - -select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1; - --- 2. Test Select + TS + FIL -select * from t1 where t1.c_int >= 0; -select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; -select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; - -select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; - --- 3 Test Select + Select + TS + FIL -select * from (select * from t1 where t1.c_int >= 0) as t1; -select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; -select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; -select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; - -select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0; -select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; -select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; -select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100; - -select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0; -select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0; - --- 4. Test Select + Join + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key; -select t1.key from t1 join t3; -select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1; -select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key; -select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key; -select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key; - -select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; -select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; -select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p; -select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key; -select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key; - -select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; -select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; - -select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; -select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; - -select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; -select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; - --- 5. Test Select + Join + FIL + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); -select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); -select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); -select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); - -select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0); - -select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); - - - -select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); - -select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - - - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - - - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - -select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); - - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select * from t1 group by c_int; -select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key; -select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c; - --- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select * from t1 group by c_int limit 1; -select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1; -select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1; -select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5; -select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5; - -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5; - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1; -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int; -select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1; -select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1; -select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1; -select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1; -select count(c_int) as a, avg(c_float), key from t1 group by key; -select count(distinct c_int) as a, avg(c_float) from t1 group by c_float; -select count(distinct c_int) as a, avg(c_float) from t1 group by c_int; -select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int; - --- 9. Test Windowing Functions -select count(c_int) over() from t1; -select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1; -select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1; -select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1; -select 1+sum(c_int) over() from t1; -select sum(c_int)+sum(sum(c_int)) over() from t1; -select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1; -select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1; - --- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from t1; -create view v2 as select c_int, value from t2; - -select value from v1 where c_boolean=false; -select max(c_int) from v1 group by (c_boolean); - -select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int; -select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; - -select count(*) from v1 a join v1 b on a.value = b.value; - -create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean; - -select count(val) from v3 where val != '1'; -with q1 as ( select key from t1 where key = '1') -select count(*) from q1; - -with q1 as ( select value from v1 where c_boolean = false) -select count(value) from q1 ; - -create view v4 as -with q1 as ( select key,c_int from t1 where key = '1') -select * from q1 -; - -with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') -select sum(c_int) from (select c_int from q1) a; - -with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; - - -drop view v1; -drop view v2; -drop view v3; -drop view v4; - --- 11. Union All -select * from t1 union all select * from t2 order by key, c_boolean, value, dt; -select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key; -select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key; - --- 12. SemiJoin -select t1.c_int from t1 left semi join t2 on t1.key=t2.key; -select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0); -select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0); -select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0); -select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); -select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; -select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; - --- 13. null expr in select list -select null from t3; - --- 14. unary operator -select key from t1 where c_int = -6 or c_int = +6; - --- 15. query referencing only partition columns -select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' ; - --- 16. SubQueries Not In --- non agg, non corr -select * -from src_cbo -where src_cbo.key not in - ( select key from src_cbo s1 - where s1.key > '2' - ) order by key -; - --- non agg, corr -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size as r from part) a - where r < 10 and b.p_mfgr = a.p_mfgr - ) -; - --- agg, non corr -select p_name, p_size -from -part where part.p_size not in - (select avg(p_size) - from (select p_size from part) a - where p_size < 10 - ) order by p_name -; - --- agg, corr -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size from part) a - where p_size < 10 and b.p_mfgr = a.p_mfgr - ) order by p_name -; - --- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) -from lineitem li -where li.l_linenumber = 1 and - li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') -group by li.l_partkey -; - --- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a - where min(p_retailprice) = l and r - l > 600 - ) - order by b.p_mfgr -; - --- agg, non corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from part a - group by p_mfgr - having max(p_retailprice) - min(p_retailprice) > 600 - ) - order by b.p_mfgr -; - --- 17. SubQueries In --- non agg, non corr -select * -from src_cbo -where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') -; - --- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * -from src_cbo b -where b.key in - (select distinct a.key - from src_cbo a - where b.value = a.value and a.key > '9' - ) -; - --- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey -where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) -; - --- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) -from src_cbo b -where b.key in (select key from src_cbo where src_cbo.key > '8') -group by key, value -having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) -; - --- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) -from part -group by p_mfgr, p_name -having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) -; - --- 18. SubQueries Not Exists --- distinct, corr -select * -from src_cbo b -where not exists - (select distinct a.key - from src_cbo a - where b.value = a.value and a.value > 'val_2' - ) -; - --- no agg, corr, having -select * -from src_cbo b -group by key, value -having not exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_12' - ) -; - --- 19. SubQueries Exists --- view test -create view cv1 as -select * -from src_cbo b -where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') -; - -select * from cv1 -; - --- sq in from -select * -from (select * - from src_cbo b - where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') - ) a -; - --- sq in from, having -select * -from (select b.key, count(*) - from src_cbo b - group by b.key - having exists - (select a.key - from src_cbo a - where a.key = b.key and a.value > 'val_9' - ) -) a -; - --- 20. Test get stats with empty partition list -select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true; - --- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc; - -select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc; - -select unionsrc.key FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key; - -select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key; - -select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc group by unionsrc.key order by unionsrc.key; - --- Windowing -select *, rank() over(partition by key order by value) as rr from src1; - -select *, rank() over(partition by key order by value) from src1; Index: ql/src/test/queries/clientpositive/cbo_gby.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_gby.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_gby.q (working copy) @@ -0,0 +1,21 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from cbo_t1 group by c_int; +select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key; +select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c; + Index: ql/src/test/queries/clientpositive/cbo_gby_empty.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_gby_empty.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_gby_empty.q (working copy) @@ -0,0 +1,29 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 21. Test groupby is empty and there is no other cols in aggr +select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc; + +select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc; + +select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key; + +select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key; + +select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key; + Index: ql/src/test/queries/clientpositive/cbo_join.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_join.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_join.q (working copy) @@ -0,0 +1,63 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 4. Test Select + Join + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key; +select cbo_t1.key from cbo_t1 join cbo_t3; +select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1; +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key; +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key; +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key; + +select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key; +select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a; +select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p; +select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key; +select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key; + +select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key; +select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a; + +select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key; +select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a; + +select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key; +select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a; + +-- 5. Test Select + Join + FIL + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0); +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0); +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0); +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0); + +select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or cbo_t2.q >= 0); + +select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0); + +select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0); + +select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + Index: ql/src/test/queries/clientpositive/cbo_limit.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_limit.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_limit.q (working copy) @@ -0,0 +1,16 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from cbo_t1 group by c_int limit 1; +select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1; +select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1; +select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5; +select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc limit 5; Index: ql/src/test/queries/clientpositive/cbo_semijoin.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_semijoin.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_semijoin.q (working copy) @@ -0,0 +1,16 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 12. SemiJoin +select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key; +select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0); +select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0); +select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; +select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; + Index: ql/src/test/queries/clientpositive/cbo_simple_select.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_simple_select.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_simple_select.q (working copy) @@ -0,0 +1,54 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 1. Test Select + TS +select * from cbo_t1; +select * from cbo_t1 as cbo_t1; +select * from cbo_t1 as cbo_t2; + +select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1; + +-- 2. Test Select + TS + FIL +select * from cbo_t1 where cbo_t1.c_int >= 0; +select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; + +select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; + +-- 3 Test Select + Select + TS + FIL +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1; +select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1; +select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1; +select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1; + +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0; +select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100; + +select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0; +select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0; + + + +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0; +select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100; + +select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0; +select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0; + + + +-- 13. null expr in select list +select null from cbo_t3; + +-- 14. unary operator +select key from cbo_t1 where c_int = -6 or c_int = +6; + +-- 15. query referencing only partition columns +select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' ; Index: ql/src/test/queries/clientpositive/cbo_stats.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_stats.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_stats.q (working copy) @@ -0,0 +1,9 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 20. Test get stats with empty partition list +select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true; + Index: ql/src/test/queries/clientpositive/cbo_subq_exists.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_subq_exists.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_subq_exists.q (working copy) @@ -0,0 +1,66 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +; + +-- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +; + +-- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +; + +select * from cv1 +; + +-- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +; + +-- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +; + Index: ql/src/test/queries/clientpositive/cbo_subq_in.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_subq_in.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_subq_in.q (working copy) @@ -0,0 +1,54 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +; + +-- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +; + +-- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +; + +-- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +; + +-- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +; + Index: ql/src/test/queries/clientpositive/cbo_subq_not_in.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_subq_not_in.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_subq_not_in.q (working copy) @@ -0,0 +1,80 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) order by key +; + +-- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +; + +-- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +; + +-- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +; + +-- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +; + +-- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +; + +-- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +; + Index: ql/src/test/queries/clientpositive/cbo_udf_udaf.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_udf_udaf.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_udf_udaf.q (working copy) @@ -0,0 +1,17 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1; +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int; +select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1; +select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1; +select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1; +select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1; +select count(c_int) as a, avg(c_float), key from cbo_t1 group by key; +select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float; +select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int; +select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int; Index: ql/src/test/queries/clientpositive/cbo_union.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_union.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_union.q (working copy) @@ -0,0 +1,11 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 11. Union All +select * from cbo_t1 order by key, c_boolean, value, dt union all select * from cbo_t2 order by key, c_boolean, value, dt; +select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key; +select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key; + Index: ql/src/test/queries/clientpositive/cbo_views.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_views.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_views.q (working copy) @@ -0,0 +1,45 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from cbo_t1; +create view v2 as select c_int, value from cbo_t2; + +select value from v1 where c_boolean=false; +select max(c_int) from v1 group by (c_boolean); + +select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int; +select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; + +select count(*) from v1 a join v1 b on a.value = b.value; + +create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean; + +select count(val) from v3 where val != '1'; +with q1 as ( select key from cbo_t1 where key = '1') +select count(*) from q1; + +with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 ; + +create view v4 as +with q1 as ( select key,c_int from cbo_t1 where key = '1') +select * from q1 +; + +with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a; + +with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; + + +drop view v1; +drop view v2; +drop view v3; +drop view v4; Index: ql/src/test/queries/clientpositive/cbo_windowing.q =================================================================== --- ql/src/test/queries/clientpositive/cbo_windowing.q (revision 0) +++ ql/src/test/queries/clientpositive/cbo_windowing.q (working copy) @@ -0,0 +1,18 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 9. Test Windowing Functions +select count(c_int) over() from cbo_t1; +select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1; +select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1; +select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1; +select 1+sum(c_int) over() from cbo_t1; +select sum(c_int)+sum(sum(c_int)) over() from cbo_t1; +select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1; +select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1; +select *, rank() over(partition by key order by value) as rr from src1; +select *, rank() over(partition by key order by value) from src1; + Index: ql/src/test/queries/clientpositive/create_like.q =================================================================== --- ql/src/test/queries/clientpositive/create_like.q (revision 1637277) +++ ql/src/test/queries/clientpositive/create_like.q (working copy) @@ -21,9 +21,11 @@ SELECT * FROM table1; SELECT * FROM table2; -CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:hive.root}/data/files/ext_test'; -CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION '${system:hive.root}/data/files/ext_test'; +dfs -cp ${system:hive.root}/data/files/ext_test ${system:test.tmp.dir}/ext_test; +CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.tmp.dir}/ext_test'; +CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION '${system:test.tmp.dir}/ext_test'; + SELECT * FROM table4; SELECT * FROM table5; @@ -31,7 +33,7 @@ SELECT * FROM table4; DROP TABLE table4; -CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:hive.root}/data/files/ext_test'; +CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.tmp.dir}/ext_test'; SELECT * FROM table4; CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ Index: ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q =================================================================== --- ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q (revision 0) +++ ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q (working copy) @@ -0,0 +1,117 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; +set hive.exec.dynamic.partition.mode=nonstrict; + +set hive.optimize.sort.dynamic.partition=false; + +-- single level partition, sorted dynamic partition disabled +drop table acid; +CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid partition(ds) select key,value,ds from srcpart; +select count(*) from acid where ds='2008-04-08'; + +insert into table acid partition(ds='2008-04-08') values("foo", "bar"); +select count(*) from acid where ds='2008-04-08'; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08'; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08'; +select count(*) from acid where ds='2008-04-08'; + +explain update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08'); +update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08'); +select count(*) from acid where ds in ('2008-04-08'); + +delete from acid where key = 'foo' and ds='2008-04-08'; +select count(*) from acid where ds='2008-04-08'; + +set hive.optimize.sort.dynamic.partition=true; + +-- single level partition, sorted dynamic partition enabled +drop table acid; +CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid partition(ds) select key,value,ds from srcpart; +select count(*) from acid where ds='2008-04-08'; + +insert into table acid partition(ds='2008-04-08') values("foo", "bar"); +select count(*) from acid where ds='2008-04-08'; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08'; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08'; +select count(*) from acid where ds='2008-04-08'; + +explain update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08'); +update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08'); +select count(*) from acid where ds in ('2008-04-08'); + +delete from acid where key = 'foo' and ds='2008-04-08'; +select count(*) from acid where ds='2008-04-08'; + +set hive.optimize.sort.dynamic.partition=false; + +-- 2 level partition, sorted dynamic partition disabled +drop table acid; +CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid partition(ds,hr) select * from srcpart; +select count(*) from acid where ds='2008-04-08' and hr=11; + +insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar"); +select count(*) from acid where ds='2008-04-08' and hr=11; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11; +select count(*) from acid where ds='2008-04-08' and hr=11; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11; +select count(*) from acid where ds='2008-04-08' and hr>=11; + +delete from acid where key = 'foo' and ds='2008-04-08' and hr=11; +select count(*) from acid where ds='2008-04-08' and hr=11; + +set hive.optimize.sort.dynamic.partition=true; + +-- 2 level partition, sorted dynamic partition enabled +drop table acid; +CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid partition(ds,hr) select * from srcpart; +select count(*) from acid where ds='2008-04-08' and hr=11; + +insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar"); +select count(*) from acid where ds='2008-04-08' and hr=11; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11; +select count(*) from acid where ds='2008-04-08' and hr=11; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11; +select count(*) from acid where ds='2008-04-08' and hr>=11; + +delete from acid where key = 'foo' and ds='2008-04-08' and hr=11; +select count(*) from acid where ds='2008-04-08' and hr=11; + +set hive.optimize.sort.dynamic.partition=true; +set hive.optimize.constant.propagation=false; + +-- 2 level partition, sorted dynamic partition enabled, constant propagation disabled +drop table acid; +CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid partition(ds,hr) select * from srcpart; +select count(*) from acid where ds='2008-04-08' and hr=11; + +insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar"); +select count(*) from acid where ds='2008-04-08' and hr=11; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11; +select count(*) from acid where ds='2008-04-08' and hr=11; + +explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11; +update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11; +select count(*) from acid where ds='2008-04-08' and hr>=11; + +delete from acid where key = 'foo' and ds='2008-04-08' and hr=11; +select count(*) from acid where ds='2008-04-08' and hr=11; + +set hive.optimize.sort.dynamic.partition=true; Index: ql/src/test/queries/clientpositive/join_alt_syntax.q =================================================================== --- ql/src/test/queries/clientpositive/join_alt_syntax.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_alt_syntax.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - explain select p1.p_name, p2.p_name from part p1 , part p2; Index: ql/src/test/queries/clientpositive/join_cond_pushdown_1.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_1.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_1.q (working copy) @@ -1,22 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - - explain select * from part p1 join part p2 join part p3 on p1.p_name = p2.p_name and p2.p_name = p3.p_name; Index: ql/src/test/queries/clientpositive/join_cond_pushdown_2.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_2.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_2.q (working copy) @@ -1,21 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - explain select * from part p1 join part p2 join part p3 on p1.p_name = p2.p_name join part p4 on p2.p_name = p3.p_name and p1.p_name = p4.p_name; Index: ql/src/test/queries/clientpositive/join_cond_pushdown_3.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_3.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_3.q (working copy) @@ -1,22 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - - explain select * from part p1 join part p2 join part p3 where p1.p_name = p2.p_name and p2.p_name = p3.p_name; Index: ql/src/test/queries/clientpositive/join_cond_pushdown_4.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_4.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_4.q (working copy) @@ -1,21 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - explain select * from part p1 join part p2 join part p3 on p1.p_name = p2.p_name join part p4 where p2.p_name = p3.p_name and p1.p_name = p4.p_name; Index: ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - create table part2( p2_partkey INT, p2_name STRING, Index: ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - create table part2( p2_partkey INT, p2_name STRING, Index: ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - create table part2( p2_partkey INT, p2_name STRING, Index: ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q =================================================================== --- ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - create table part2( p2_partkey INT, p2_name STRING, Index: ql/src/test/queries/clientpositive/join_merging.q =================================================================== --- ql/src/test/queries/clientpositive/join_merging.q (revision 1637277) +++ ql/src/test/queries/clientpositive/join_merging.q (working copy) @@ -1,17 +1,4 @@ - -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - explain select p1.p_size, p2.p_size from part p1 left outer join part p2 on p1.p_partkey = p2.p_partkey right outer join part p3 on p2.p_partkey = p3.p_partkey and Index: ql/src/test/queries/clientpositive/leadlag.q =================================================================== --- ql/src/test/queries/clientpositive/leadlag.q (revision 1637277) +++ ql/src/test/queries/clientpositive/leadlag.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - --1. testLagWithPTFWindowing select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, Index: ql/src/test/queries/clientpositive/leadlag_queries.q =================================================================== --- ql/src/test/queries/clientpositive/leadlag_queries.q (revision 1637277) +++ ql/src/test/queries/clientpositive/leadlag_queries.q (working copy) @@ -1,18 +1,3 @@ --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- 1. testLeadUDAF select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr order by p_name) as l1, Index: ql/src/test/queries/clientpositive/orc_merge1.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge1.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge1.q (working copy) @@ -31,7 +31,7 @@ SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -DESC FORMATTED orcfile_merge1 partition (ds='1', part='0'); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/; set hive.merge.tezfiles=true; set hive.merge.mapfiles=true; @@ -46,7 +46,7 @@ SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -DESC FORMATTED orcfile_merge1b partition (ds='1', part='0'); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/; set hive.merge.orcfile.stripe.level=true; -- auto-merge fast way @@ -59,7 +59,7 @@ SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -DESC FORMATTED orcfile_merge1c partition (ds='1', part='0'); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- Verify Index: ql/src/test/queries/clientpositive/orc_merge2.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge2.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge2.q (working copy) @@ -18,7 +18,7 @@ PMOD(HASH(value), 10) as three FROM src; -DESC FORMATTED orcfile_merge2a partition (one='1', two='0', three='2'); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a/one=1/two=0/three=2/; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) Index: ql/src/test/queries/clientpositive/orc_merge3.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge3.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge3.q (working copy) @@ -19,7 +19,7 @@ INSERT OVERWRITE TABLE orcfile_merge3b SELECT key, value FROM orcfile_merge3a; -DESC FORMATTED orcfile_merge3b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3b/; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) Index: ql/src/test/queries/clientpositive/orc_merge4.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge4.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge4.q (working copy) @@ -11,7 +11,7 @@ INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') SELECT * FROM src; -DESC FORMATTED orcfile_merge3a PARTITION (ds='1'); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=1/; set hive.merge.mapfiles=true; INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') @@ -20,8 +20,8 @@ INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2') SELECT * FROM src; -DESC FORMATTED orcfile_merge3a PARTITION (ds='1'); -DESC FORMATTED orcfile_merge3a PARTITION (ds='2'); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=1/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=2/; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b Index: ql/src/test/queries/clientpositive/orc_merge5.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge5.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge5.q (working copy) @@ -23,7 +23,7 @@ -- 3 files total analyze table orc_merge5b compute statistics noscan; -desc formatted orc_merge5b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; select * from orc_merge5b; set hive.merge.orcfile.stripe.level=true; @@ -37,7 +37,7 @@ -- 1 file after merging analyze table orc_merge5b compute statistics noscan; -desc formatted orc_merge5b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; select * from orc_merge5b; set hive.merge.orcfile.stripe.level=false; @@ -47,7 +47,7 @@ insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; analyze table orc_merge5b compute statistics noscan; -desc formatted orc_merge5b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; select * from orc_merge5b; set hive.merge.orcfile.stripe.level=true; @@ -56,6 +56,6 @@ -- 1 file after merging analyze table orc_merge5b compute statistics noscan; -desc formatted orc_merge5b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; select * from orc_merge5b; Index: ql/src/test/queries/clientpositive/orc_merge6.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge6.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge6.q (working copy) @@ -26,8 +26,8 @@ -- 3 files total analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -desc formatted orc_merge5a partition(year="2000",hour=24); -desc formatted orc_merge5a partition(year="2001",hour=24); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; show partitions orc_merge5a; select * from orc_merge5a; @@ -44,8 +44,8 @@ -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -desc formatted orc_merge5a partition(year="2000",hour=24); -desc formatted orc_merge5a partition(year="2001",hour=24); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; show partitions orc_merge5a; select * from orc_merge5a; @@ -58,8 +58,8 @@ insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -desc formatted orc_merge5a partition(year="2000",hour=24); -desc formatted orc_merge5a partition(year="2001",hour=24); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; show partitions orc_merge5a; select * from orc_merge5a; @@ -71,8 +71,8 @@ -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -desc formatted orc_merge5a partition(year="2000",hour=24); -desc formatted orc_merge5a partition(year="2001",hour=24); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; show partitions orc_merge5a; select * from orc_merge5a; Index: ql/src/test/queries/clientpositive/orc_merge7.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge7.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge7.q (working copy) @@ -30,8 +30,8 @@ -- 3 files total analyze table orc_merge5a partition(st=80.0) compute statistics noscan; analyze table orc_merge5a partition(st=0.8) compute statistics noscan; -desc formatted orc_merge5a partition(st=80.0); -desc formatted orc_merge5a partition(st=0.8); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; show partitions orc_merge5a; select * from orc_merge5a where userid<=13; @@ -48,8 +48,8 @@ -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan; analyze table orc_merge5a partition(st=0.8) compute statistics noscan; -desc formatted orc_merge5a partition(st=80.0); -desc formatted orc_merge5a partition(st=0.8); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; show partitions orc_merge5a; select * from orc_merge5a where userid<=13; @@ -62,8 +62,8 @@ insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; analyze table orc_merge5a partition(st=80.0) compute statistics noscan; analyze table orc_merge5a partition(st=0.8) compute statistics noscan; -desc formatted orc_merge5a partition(st=80.0); -desc formatted orc_merge5a partition(st=0.8); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; show partitions orc_merge5a; select * from orc_merge5a where userid<=13; @@ -75,8 +75,8 @@ -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan; analyze table orc_merge5a partition(st=0.8) compute statistics noscan; -desc formatted orc_merge5a partition(st=80.0); -desc formatted orc_merge5a partition(st=0.8); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; show partitions orc_merge5a; select * from orc_merge5a where userid<=13; Index: ql/src/test/queries/clientpositive/orc_merge_incompat1.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge_incompat1.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge_incompat1.q (working copy) @@ -22,7 +22,7 @@ -- 5 files total analyze table orc_merge5b compute statistics noscan; -desc formatted orc_merge5b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; select * from orc_merge5b; set hive.merge.orcfile.stripe.level=true; @@ -30,6 +30,6 @@ -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind analyze table orc_merge5b compute statistics noscan; -desc formatted orc_merge5b; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; select * from orc_merge5b; Index: ql/src/test/queries/clientpositive/orc_merge_incompat2.q =================================================================== --- ql/src/test/queries/clientpositive/orc_merge_incompat2.q (revision 1637277) +++ ql/src/test/queries/clientpositive/orc_merge_incompat2.q (working copy) @@ -32,8 +32,8 @@ analyze table orc_merge5a partition(st=80.0) compute statistics noscan; analyze table orc_merge5a partition(st=0.8) compute statistics noscan; -desc formatted orc_merge5a partition(st=80.0); -desc formatted orc_merge5a partition(st=0.8); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; show partitions orc_merge5a; select * from orc_merge5a where userid<=13; @@ -44,8 +44,8 @@ analyze table orc_merge5a partition(st=80.0) compute statistics noscan; analyze table orc_merge5a partition(st=0.8) compute statistics noscan; -desc formatted orc_merge5a partition(st=80.0); -desc formatted orc_merge5a partition(st=0.8); +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; show partitions orc_merge5a; select * from orc_merge5a where userid<=13; Index: ql/src/test/queries/clientpositive/order_within_subquery.q =================================================================== --- ql/src/test/queries/clientpositive/order_within_subquery.q (revision 1637277) +++ ql/src/test/queries/clientpositive/order_within_subquery.q (working copy) @@ -1,18 +1,3 @@ -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - select t1.p_name, t2.p_name from (select * from part order by p_size limit 10) t1 join part t2 on t1.p_partkey = t2.p_partkey and t1.p_size = t2.p_size where t1.p_partkey < 100000; Index: ql/src/test/queries/clientpositive/ptf.q =================================================================== --- ql/src/test/queries/clientpositive/ptf.q (revision 1637277) +++ ql/src/test/queries/clientpositive/ptf.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- SORT_QUERY_RESULTS --1. test1 Index: ql/src/test/queries/clientpositive/ptf_decimal.q =================================================================== --- ql/src/test/queries/clientpositive/ptf_decimal.q (revision 1637277) +++ ql/src/test/queries/clientpositive/ptf_decimal.q (working copy) @@ -1,20 +1,4 @@ -DROP TABLE IF EXISTS part; --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DECIMAL(6,2), - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- 1. aggregate functions with decimal type select p_mfgr, p_retailprice, Index: ql/src/test/queries/clientpositive/ptf_general_queries.q =================================================================== --- ql/src/test/queries/clientpositive/ptf_general_queries.q (revision 1637277) +++ ql/src/test/queries/clientpositive/ptf_general_queries.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- 1. testNoPTFNoWindowing select p_mfgr, p_name, p_size from part Index: ql/src/test/queries/clientpositive/ptf_streaming.q =================================================================== --- ql/src/test/queries/clientpositive/ptf_streaming.q (revision 1637277) +++ ql/src/test/queries/clientpositive/ptf_streaming.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - create temporary function noopstreaming as 'org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming$NoopStreamingResolver'; --1. test1 Index: ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q =================================================================== --- ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q (revision 1637277) +++ ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q (working copy) @@ -1,8 +1,8 @@ set hive.stats.dbclass=fs; set hive.stats.autogather=true; -DROP TABLE lineitem; -CREATE TABLE lineitem (L_ORDERKEY INT, +DROP TABLE IF EXISTS lineitem_ix; +CREATE TABLE lineitem_ix (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, @@ -21,28 +21,28 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix; -CREATE INDEX lineitem_lshipdate_idx ON TABLE lineitem(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)"); -ALTER INDEX lineitem_lshipdate_idx ON lineitem REBUILD; +CREATE INDEX lineitem_ix_lshipdate_idx ON TABLE lineitem_ix(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)"); +ALTER INDEX lineitem_ix_lshipdate_idx ON lineitem_ix REBUILD; explain select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate; select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate order by l_shipdate; set hive.optimize.index.groupby=true; explain select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate; select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate order by l_shipdate; @@ -52,14 +52,14 @@ explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month; select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month; @@ -68,14 +68,14 @@ explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month; select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month; @@ -86,24 +86,24 @@ from (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem + from lineitem_ix where year(l_shipdate) = 1997 group by year(l_shipdate), month(l_shipdate) ) lastyear join (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem + from lineitem_ix where year(l_shipdate) = 1998 group by year(l_shipdate), month(l_shipdate) ) thisyear on lastyear.month = thisyear.month; explain select l_shipdate, cnt -from (select l_shipdate, count(l_shipdate) as cnt from lineitem group by l_shipdate +from (select l_shipdate, count(l_shipdate) as cnt from lineitem_ix group by l_shipdate union all select l_shipdate, l_orderkey as cnt -from lineitem) dummy; +from lineitem_ix) dummy; CREATE TABLE tbl(key int, value int); CREATE INDEX tbl_key_idx ON TABLE tbl(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); Index: ql/src/test/queries/clientpositive/reducesink_dedup.q =================================================================== --- ql/src/test/queries/clientpositive/reducesink_dedup.q (revision 1637277) +++ ql/src/test/queries/clientpositive/reducesink_dedup.q (working copy) @@ -1,19 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - - select p_name from (select p_name from part distribute by 1 sort by 1) p distribute by 1 sort by 1 Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q (working copy) @@ -0,0 +1,44 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +-- copy from skewjoinopt1 +-- test compile time skew join and auto map join +-- a simple join query with skew on both the tables on the join key +-- adding an order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; + +-- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; + +-- an aggregation at the end should not change anything + +EXPLAIN +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; + +EXPLAIN +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; + +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q (working copy) @@ -0,0 +1,53 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1; + +-- testing skew on other data types - int +CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)); +INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1; + +CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2; + +CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)); + +INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2; + +-- copy from skewjoinopt15 +-- test compile time skew join and auto map join +-- The skewed key is a integer column. +-- Otherwise this test is similar to skewjoinopt1.q +-- Both the joined tables are skewed, and the joined column +-- is an integer +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; + +-- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; + +-- an aggregation at the end should not change anything + +EXPLAIN +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; + +EXPLAIN +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; + +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q (working copy) @@ -0,0 +1,26 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +CLUSTERED BY (key) INTO 4 BUCKETS +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +-- copy from skewjoinopt19 +-- test compile time skew join and auto map join +-- add a test where the skewed key is also the bucketized key +-- it should not matter, and the compile time skewed join +-- optimization is performed +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q (working copy) @@ -0,0 +1,34 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +-- copy from skewjoinopt3 +-- test compile time skew join and auto map join +-- a simple query with skew on both the tables. One of the skewed +-- value is common to both the tables. The skewed value should not be +-- repeated in the filter. +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; + +-- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q (working copy) @@ -0,0 +1,26 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +-- copy from skewjoinopt6 +-- test compile time skew join and auto map join +-- Both the join tables are skewed by 2 keys, and one of the skewed values +-- is common to both the tables. The join key is a subset of the skewed key set: +-- it only contains the first skewed key for both the tables +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; + +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q (working copy) @@ -0,0 +1,30 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; + +-- copy from skewjoinopt7 +-- test compile time skew join and auto map join +-- This test is for validating skewed join compile time optimization for more than +-- 2 tables. The join key is the same, and so a 3-way join would be performed. +-- 2 of the 3 tables are skewed on the join key +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key; + +SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +ORDER BY a.key, b.key, c.key, a.val, b.val, c.val; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q (working copy) @@ -0,0 +1,51 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +-- copy from skewjoinopt9 +-- test compile time skew join and auto map join +-- no skew join compile time optimization would be performed if one of the +-- join sources is a sub-query consisting of a union all +-- adding a order by at the end to make the results deterministic +EXPLAIN +select * from +( +select key, val from T1 + union all +select key, val from T1 +) subq1 +join T2 b on subq1.key = b.key; + +select * from +( +select key, val from T1 + union all +select key, val from T1 +) subq1 +join T2 b on subq1.key = b.key +ORDER BY subq1.key, b.key, subq1.val, b.val; + +-- no skew join compile time optimization would be performed if one of the +-- join sources is a sub-query consisting of a group by +EXPLAIN +select * from +( +select key, count(1) as cnt from T1 group by key +) subq1 +join T2 b on subq1.key = b.key; + +select * from +( +select key, count(1) as cnt from T1 group by key +) subq1 +join T2 b on subq1.key = b.key +ORDER BY subq1.key, b.key, subq1.cnt, b.val; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q (working copy) @@ -0,0 +1,22 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +drop table array_valued_T1; +create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)); +insert overwrite table array_valued_T1 select key, array(value) from T1; + +-- copy from skewjoinopt10 +-- test compile time skew join and auto map join +-- This test is to verify the skew join compile optimization when the join is followed by a lateral view +-- adding a order by at the end to make the results deterministic + +explain +select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val; + +select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +ORDER BY key, val; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q (working copy) @@ -0,0 +1,35 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +-- copy from skewjoinopt11 +-- test compile time skew join and auto map join +-- This test is to verify the skew join compile optimization when the join is followed +-- by a union. Both sides of a union consist of a join, which should have used +-- skew join compile time optimization. +-- adding an order by at the end to make the results deterministic + +EXPLAIN +select * from +( + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + union all + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key +) subq1; + +select * from +( + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + union all + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key +) subq1 +ORDER BY key, val1, val2; Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q (working copy) @@ -0,0 +1,38 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +CREATE TABLE T3(key STRING, val STRING) +SKEWED BY (val) ON ((12)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; + +-- copy from skewjoinopt13 +-- test compile time skew join and auto map join +-- This test is for skewed join compile time optimization for more than 2 tables. +-- The join key for table 3 is different from the join key used for joining +-- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table +-- 3 consist of a sub-query which contains a join, the compile time skew join +-- optimization is not performed +-- adding a order by at the end to make the results deterministic + +EXPLAIN +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val; + +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +order by a.key, b.key, c.key, a.val, b.val, c.val; + Index: ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q =================================================================== --- ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q (revision 0) +++ ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q (working copy) @@ -0,0 +1,40 @@ +set hive.mapred.supports.subdirectories=true; +set hive.optimize.skewjoin.compiletime = true; +set hive.auto.convert.join=true; + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +CREATE TABLE T3(key STRING, val STRING) +SKEWED BY (val) ON ((12)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; + +-- copy from skewjoinopt14 +-- test compile time skew join and auto map join +-- This test is for skewed join compile time optimization for more than 2 tables. +-- The join key for table 3 is different from the join key used for joining +-- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table +-- 3 consist of a sub-query which contains a join, the compile time skew join +-- optimization is not enabled for table 3, but it is used for the first join between +-- tables 1 and 2 +-- adding a order by at the end to make the results deterministic + +EXPLAIN +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val; + +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +order by a.key, b.key, a.val, b.val; + Index: ql/src/test/queries/clientpositive/stats_noscan_2.q =================================================================== --- ql/src/test/queries/clientpositive/stats_noscan_2.q (revision 1637277) +++ ql/src/test/queries/clientpositive/stats_noscan_2.q (working copy) @@ -1,7 +1,8 @@ +dfs -cp ${system:hive.root}/data/files/ext_test ${system:test.tmp.dir}/analyze_external; -- test analyze table compute statistiscs [noscan] on external table -- 1 test table -CREATE EXTERNAL TABLE anaylyze_external (a INT) LOCATION '${system:hive.root}/data/files/ext_test'; +CREATE EXTERNAL TABLE anaylyze_external (a INT) LOCATION '${system:test.tmp.dir}/analyze_external'; SELECT * FROM anaylyze_external; analyze table anaylyze_external compute statistics noscan; describe formatted anaylyze_external; Index: ql/src/test/queries/clientpositive/subquery_in.q =================================================================== --- ql/src/test/queries/clientpositive/subquery_in.q (revision 1637277) +++ ql/src/test/queries/clientpositive/subquery_in.q (working copy) @@ -1,44 +1,5 @@ -- SORT_QUERY_RESULTS -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -DROP TABLE lineitem; -CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; - -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; - -- non agg, non corr explain select * Index: ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q =================================================================== --- ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q (revision 1637277) +++ ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q (working copy) @@ -1,39 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -DROP TABLE lineitem; -CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; - - -- non agg, non corr explain rewrite select * Index: ql/src/test/queries/clientpositive/subquery_in_having.q =================================================================== --- ql/src/test/queries/clientpositive/subquery_in_having.q (revision 1637277) +++ ql/src/test/queries/clientpositive/subquery_in_having.q (working copy) @@ -1,7 +1,9 @@ -- SORT_QUERY_RESULTS -- data setup -CREATE TABLE part( +DROP TABLE IF EXISTS part_subq; + +CREATE TABLE part_subq( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -13,7 +15,7 @@ p_comment STRING ); -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; +LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part_subq; -- non agg, non corr explain @@ -45,11 +47,11 @@ -- agg, non corr explain select p_mfgr, avg(p_size) -from part b +from part_subq b group by b.p_mfgr having b.p_mfgr in (select p_mfgr - from part + from part_subq group by p_mfgr having max(p_size) - min(p_size) < 20 ) @@ -60,11 +62,11 @@ -- agg, non corr explain select p_mfgr, avg(p_size) -from part b +from part_subq b group by b.p_mfgr having b.p_mfgr in (select p_mfgr - from part + from part_subq group by p_mfgr having max(p_size) - min(p_size) < 20 ) @@ -113,8 +115,10 @@ -- non agg, non corr, windowing explain select p_mfgr, p_name, avg(p_size) -from part +from part_subq group by p_mfgr, p_name having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part_subq) ; + +DROP TABLE part_subq; \ No newline at end of file Index: ql/src/test/queries/clientpositive/subquery_notin.q =================================================================== --- ql/src/test/queries/clientpositive/subquery_notin.q (revision 1637277) +++ ql/src/test/queries/clientpositive/subquery_notin.q (working copy) @@ -1,42 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -DROP TABLE lineitem; -CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; - -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; - -- non agg, non corr explain select * Index: ql/src/test/queries/clientpositive/subquery_notin_having.q =================================================================== --- ql/src/test/queries/clientpositive/subquery_notin_having.q (revision 1637277) +++ ql/src/test/queries/clientpositive/subquery_notin_having.q (working copy) @@ -1,21 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - -- non agg, non corr explain select key, count(*) Index: ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q =================================================================== --- ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q (revision 1637277) +++ ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q (working copy) @@ -1,18 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - create table src11 (key1 string, value1 string); create table part2( Index: ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q =================================================================== --- ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q (revision 1637277) +++ ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TEMPORARY TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - drop table over10k; create temporary table over10k( Index: ql/src/test/queries/clientpositive/vector_aggregate_9.q =================================================================== --- ql/src/test/queries/clientpositive/vector_aggregate_9.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_aggregate_9.q (working copy) @@ -0,0 +1,43 @@ +SET hive.vectorized.execution.enabled=true; + +create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; + +create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC; + +INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; + +explain +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc; + +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc; \ No newline at end of file Index: ql/src/test/queries/clientpositive/vector_decimal_1.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_1.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_1.q (working copy) @@ -0,0 +1,58 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +drop table if exists decimal_1; + +create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) stored as orc; + +desc decimal_1; + +insert overwrite table decimal_1 + select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows); + +explain +select cast(t as boolean) from decimal_1 order by t; + +select cast(t as boolean) from decimal_1 order by t; + +explain +select cast(t as tinyint) from decimal_1 order by t; + +select cast(t as tinyint) from decimal_1 order by t; + +explain +select cast(t as smallint) from decimal_1 order by t; + +select cast(t as smallint) from decimal_1 order by t; + +explain +select cast(t as int) from decimal_1 order by t; + +select cast(t as int) from decimal_1 order by t; + +explain +select cast(t as bigint) from decimal_1 order by t; + +select cast(t as bigint) from decimal_1 order by t; + +explain +select cast(t as float) from decimal_1 order by t; + +select cast(t as float) from decimal_1 order by t; + +explain +select cast(t as double) from decimal_1 order by t; + +select cast(t as double) from decimal_1 order by t; + +explain +select cast(t as string) from decimal_1 order by t; + +select cast(t as string) from decimal_1 order by t; + +explain +select cast(t as timestamp) from decimal_1 order by t; + +select cast(t as timestamp) from decimal_1 order by t; + +drop table decimal_1; \ No newline at end of file Index: ql/src/test/queries/clientpositive/vector_decimal_10_0.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_10_0.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_10_0.q (working copy) @@ -0,0 +1,19 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS decimal_txt; +DROP TABLE IF EXISTS decimal; + +CREATE TABLE decimal_txt (dec decimal); + +LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt; + +CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt; + +EXPLAIN +SELECT dec FROM DECIMAL order by dec; + +SELECT dec FROM DECIMAL order by dec; + +DROP TABLE DECIMAL_txt; +DROP TABLE DECIMAL; \ No newline at end of file Index: ql/src/test/queries/clientpositive/vector_decimal_2.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_2.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_2.q (working copy) @@ -0,0 +1,146 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +drop table decimal_2; + +create table decimal_2 (t decimal(18,9)) stored as orc; + +insert overwrite table decimal_2 + select cast('17.29' as decimal(4,2)) from src tablesample (1 rows); + +explain +select cast(t as boolean) from decimal_2 order by t; + +select cast(t as boolean) from decimal_2 order by t; + +explain +select cast(t as tinyint) from decimal_2 order by t; + +select cast(t as tinyint) from decimal_2 order by t; + +explain +select cast(t as smallint) from decimal_2 order by t; + +select cast(t as smallint) from decimal_2 order by t; + +explain +select cast(t as int) from decimal_2 order by t; + +select cast(t as int) from decimal_2 order by t; + +explain +select cast(t as bigint) from decimal_2 order by t; + +select cast(t as bigint) from decimal_2 order by t; + +explain +select cast(t as float) from decimal_2 order by t; + +select cast(t as float) from decimal_2 order by t; + +explain +select cast(t as double) from decimal_2 order by t; + +select cast(t as double) from decimal_2 order by t; + +explain +select cast(t as string) from decimal_2 order by t; + +select cast(t as string) from decimal_2 order by t; + +insert overwrite table decimal_2 + select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows); + +explain +select cast(t as boolean) from decimal_2 order by t; + +select cast(t as boolean) from decimal_2 order by t; + +explain +select cast(t as tinyint) from decimal_2 order by t; + +select cast(t as tinyint) from decimal_2 order by t; + +explain +select cast(t as smallint) from decimal_2 order by t; + +select cast(t as smallint) from decimal_2 order by t; + +explain +select cast(t as int) from decimal_2 order by t; + +select cast(t as int) from decimal_2 order by t; + +explain +select cast(t as bigint) from decimal_2 order by t; + +select cast(t as bigint) from decimal_2 order by t; + +explain +select cast(t as float) from decimal_2 order by t; + +select cast(t as float) from decimal_2 order by t; + +explain +select cast(t as double) from decimal_2 order by t; + +select cast(t as double) from decimal_2 order by t; + +explain +select cast(t as string) from decimal_2 order by t; + +select cast(t as string) from decimal_2 order by t; + +explain +select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c; + +select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c; + +explain +select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c; + +select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c; + +explain +select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c; + +select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c; + +explain +select cast(true as decimal) as c from decimal_2 order by c; + +explain +select cast(true as decimal) as c from decimal_2 order by c; + +select cast(true as decimal) as c from decimal_2 order by c; + +explain +select cast(3Y as decimal) as c from decimal_2 order by c; + +select cast(3Y as decimal) as c from decimal_2 order by c; + +explain +select cast(3S as decimal) as c from decimal_2 order by c; + +select cast(3S as decimal) as c from decimal_2 order by c; + +explain +select cast(cast(3 as int) as decimal) as c from decimal_2 order by c; + +select cast(cast(3 as int) as decimal) as c from decimal_2 order by c; + +explain +select cast(3L as decimal) as c from decimal_2 order by c; + +select cast(3L as decimal) as c from decimal_2 order by c; + +explain +select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c; + +select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c; + +explain +select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c; + +select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c; +drop table decimal_2; Index: ql/src/test/queries/clientpositive/vector_decimal_3.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_3.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_3.q (working copy) @@ -0,0 +1,35 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_3_txt; +DROP TABLE IF EXISTS DECIMAL_3; + +CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt; + +CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt; + +SELECT * FROM DECIMAL_3 ORDER BY key, value; + +SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC; + +SELECT * FROM DECIMAL_3 ORDER BY key, value; + +SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key; + +SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key; + +SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value; + +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value; + +SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value; + +SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value; + +DROP TABLE DECIMAL_3_txt; +DROP TABLE DECIMAL_3; Index: ql/src/test/queries/clientpositive/vector_decimal_4.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_4.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_4.q (working copy) @@ -0,0 +1,28 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_4_1; +DROP TABLE IF EXISTS DECIMAL_4_2; + +CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +STORED AS ORC; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1; + +INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1; + +SELECT * FROM DECIMAL_4_1 ORDER BY key, value; + +SELECT * FROM DECIMAL_4_2 ORDER BY key, value; + +SELECT * FROM DECIMAL_4_2 ORDER BY key; + +SELECT * FROM DECIMAL_4_2 ORDER BY key, value; + +DROP TABLE DECIMAL_4_1; +DROP TABLE DECIMAL_4_2; Index: ql/src/test/queries/clientpositive/vector_decimal_5.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_5.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_5.q (working copy) @@ -0,0 +1,28 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_5_txt; +DROP TABLE IF EXISTS DECIMAL_5; + +CREATE TABLE DECIMAL_5_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_txt; + +CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +STORED AS ORC; + +INSERT OVERWRITE TABLE DECIMAL_5 SELECT * FROM DECIMAL_5_txt; + +SELECT key FROM DECIMAL_5 ORDER BY key; + +SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key; + +SELECT cast(key as decimal) FROM DECIMAL_5; + +SELECT cast(key as decimal(6,3)) FROM DECIMAL_5; + +DROP TABLE DECIMAL_5_txt; +DROP TABLE DECIMAL_5; \ No newline at end of file Index: ql/src/test/queries/clientpositive/vector_decimal_6.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_6.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_6.q (working copy) @@ -0,0 +1,48 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_6_1_txt; +DROP TABLE IF EXISTS DECIMAL_6_1; +DROP TABLE IF EXISTS DECIMAL_6_2_txt; +DROP TABLE IF EXISTS DECIMAL_6_2; +DROP TABLE IF EXISTS DECIMAL_6_3_txt; +DROP TABLE IF EXISTS DECIMAL_6_3; + +CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +CREATE TABLE DECIMAL_6_2_txt(key decimal(17,4), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_txt; +LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_txt; + +CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +STORED AS ORC; + +CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +STORED AS ORC; + +INSERT OVERWRITE TABLE DECIMAL_6_1 SELECT * FROM DECIMAL_6_1_txt; +INSERT OVERWRITE TABLE DECIMAL_6_2 SELECT * FROM DECIMAL_6_2_txt; + +SELECT * FROM DECIMAL_6_1 ORDER BY key, value; + +SELECT * FROM DECIMAL_6_2 ORDER BY key, value; + +SELECT T.key from ( + SELECT key, value from DECIMAL_6_1 + UNION ALL + SELECT key, value from DECIMAL_6_2 +) T order by T.key; + +CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v; + +desc DECIMAL_6_3; + +SELECT * FROM DECIMAL_6_3 ORDER BY k, v; + Index: ql/src/test/queries/clientpositive/vector_decimal_precision.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_precision.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_precision.q (working copy) @@ -0,0 +1,38 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_PRECISION_txt; +DROP TABLE IF EXISTS DECIMAL_PRECISION; + +CREATE TABLE DECIMAL_PRECISION_txt(dec decimal(20,10)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_txt; + +CREATE TABLE DECIMAL_PRECISION(dec decimal(20,10)) +STORED AS ORC; + +INSERT OVERWRITE TABLE DECIMAL_PRECISION SELECT * FROM DECIMAL_PRECISION_txt; + +SELECT * FROM DECIMAL_PRECISION ORDER BY dec; + +SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec; +SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec; +SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec; +SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec; +SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec; + +EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION; +SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION; + +SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1; +SELECT * from DECIMAL_PRECISION WHERE dec > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1; +SELECT dec * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1; + +SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION; +SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION; + +DROP TABLE DECIMAL_PRECISION_txt; +DROP TABLE DECIMAL_PRECISION; Index: ql/src/test/queries/clientpositive/vector_decimal_round.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_round.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_round.q (working copy) @@ -0,0 +1,55 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +create table decimal_tbl_txt (dec decimal(10,0)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +insert into table decimal_tbl_txt values(101); + +select * from decimal_tbl_txt; + +explain +select dec, round(dec, -1) from decimal_tbl_txt order by dec; + +select dec, round(dec, -1) from decimal_tbl_txt order by dec; + +explain +select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1); + +select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1); + +create table decimal_tbl_rc (dec decimal(10,0)) +row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile; + +insert into table decimal_tbl_rc values(101); + +select * from decimal_tbl_rc; + +explain +select dec, round(dec, -1) from decimal_tbl_rc order by dec; + +select dec, round(dec, -1) from decimal_tbl_rc order by dec; + +explain +select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1); + +select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1); + +create table decimal_tbl_orc (dec decimal(10,0)) +stored as orc; + +insert into table decimal_tbl_orc values(101); + +select * from decimal_tbl_orc; + +explain +select dec, round(dec, -1) from decimal_tbl_orc order by dec; + +select dec, round(dec, -1) from decimal_tbl_orc order by dec; + +explain +select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1); + +select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1); \ No newline at end of file Index: ql/src/test/queries/clientpositive/vector_decimal_round_2.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_round_2.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_round_2.q (working copy) @@ -0,0 +1,119 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +create table decimal_tbl_1_orc (dec decimal(38,18)) +STORED AS ORC; + +insert into table decimal_tbl_1_orc values(55555); + +select * from decimal_tbl_1_orc; + +-- EXPLAIN +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +EXPLAIN +SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d; + +SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d; + +create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC; + +insert into table decimal_tbl_2_orc values(125.315, -125.315); + +select * from decimal_tbl_2_orc; + +EXPLAIN +SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p; + +SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p; + +create table decimal_tbl_3_orc (dec decimal(38,18)) +STORED AS ORC; + +insert into table decimal_tbl_3_orc values(3.141592653589793); + +select * from decimal_tbl_3_orc; + +EXPLAIN +SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d; + +SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d; + +create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC; + +insert into table decimal_tbl_4_orc values(1809242.3151111344, -1809242.3151111344); + +select * from decimal_tbl_4_orc; + +EXPLAIN +SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p; + +SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p; Index: ql/src/test/queries/clientpositive/vector_decimal_trailing.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_trailing.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_trailing.q (working copy) @@ -0,0 +1,30 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_TRAILING_txt; +DROP TABLE IF EXISTS DECIMAL_TRAILING; + +CREATE TABLE DECIMAL_TRAILING_txt ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_txt; + +CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +STORED AS ORC; + +INSERT OVERWRITE TABLE DECIMAL_TRAILING SELECT * FROM DECIMAL_TRAILING_txt; + +SELECT * FROM DECIMAL_TRAILING ORDER BY id; + +DROP TABLE DECIMAL_TRAILING_txt; +DROP TABLE DECIMAL_TRAILING; Index: ql/src/test/queries/clientpositive/vector_decimal_udf.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_udf.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_udf.q (working copy) @@ -0,0 +1,142 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_UDF_txt; +DROP TABLE IF EXISTS DECIMAL_UDF; + +CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt; + +CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +STORED AS ORC; + +INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt; + +-- addition +EXPLAIN SELECT key + key FROM DECIMAL_UDF; +SELECT key + key FROM DECIMAL_UDF; + +EXPLAIN SELECT key + value FROM DECIMAL_UDF; +SELECT key + value FROM DECIMAL_UDF; + +EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF; +SELECT key + (value/2) FROM DECIMAL_UDF; + +EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF; +SELECT key + '1.0' FROM DECIMAL_UDF; + +-- substraction +EXPLAIN SELECT key - key FROM DECIMAL_UDF; +SELECT key - key FROM DECIMAL_UDF; + +EXPLAIN SELECT key - value FROM DECIMAL_UDF; +SELECT key - value FROM DECIMAL_UDF; + +EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF; +SELECT key - (value/2) FROM DECIMAL_UDF; + +EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF; +SELECT key - '1.0' FROM DECIMAL_UDF; + +-- multiplication +EXPLAIN SELECT key * key FROM DECIMAL_UDF; +SELECT key * key FROM DECIMAL_UDF; + +EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0; +SELECT key, value FROM DECIMAL_UDF where key * value > 0; + +EXPLAIN SELECT key * value FROM DECIMAL_UDF; +SELECT key * value FROM DECIMAL_UDF; + +EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF; +SELECT key * (value/2) FROM DECIMAL_UDF; + +EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF; +SELECT key * '2.0' FROM DECIMAL_UDF; + +-- division +EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1; +SELECT key / 0 FROM DECIMAL_UDF limit 1; + +EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1; +SELECT key / NULL FROM DECIMAL_UDF limit 1; + +EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0; +SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0; + +EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0; +SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0; + +EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0; +SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0; + +EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF; +SELECT 1 + (key / '2.0') FROM DECIMAL_UDF; + +-- abs +EXPLAIN SELECT abs(key) FROM DECIMAL_UDF; +SELECT abs(key) FROM DECIMAL_UDF; + +-- avg +EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value; +SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value; + +-- negative +EXPLAIN SELECT -key FROM DECIMAL_UDF; +SELECT -key FROM DECIMAL_UDF; + +-- positive +EXPLAIN SELECT +key FROM DECIMAL_UDF; +SELECT +key FROM DECIMAL_UDF; + +-- ceiling +EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF; +SELECT CEIL(key) FROM DECIMAL_UDF; + +-- floor +EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF; +SELECT FLOOR(key) FROM DECIMAL_UDF; + +-- round +EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF; +SELECT ROUND(key, 2) FROM DECIMAL_UDF; + +-- power +EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF; +SELECT POWER(key, 2) FROM DECIMAL_UDF; + +-- modulo +EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF; +SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF; + +-- stddev, var +EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value; +SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value; + +-- stddev_samp, var_samp +EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value; +SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value; + +-- histogram +EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF; +SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF; + +-- min +EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF; +SELECT MIN(key) FROM DECIMAL_UDF; + +-- max +EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF; +SELECT MAX(key) FROM DECIMAL_UDF; + +-- count +EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF; +SELECT COUNT(key) FROM DECIMAL_UDF; + +DROP TABLE IF EXISTS DECIMAL_UDF_txt; +DROP TABLE IF EXISTS DECIMAL_UDF; + Index: ql/src/test/queries/clientpositive/vector_decimal_udf2.q =================================================================== --- ql/src/test/queries/clientpositive/vector_decimal_udf2.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_decimal_udf2.q (working copy) @@ -0,0 +1,40 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +DROP TABLE IF EXISTS DECIMAL_UDF2_txt; +DROP TABLE IF EXISTS DECIMAL_UDF2; + +CREATE TABLE DECIMAL_UDF2_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2_txt; + +CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +STORED AS ORC; + +INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt; + +EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +DROP TABLE IF EXISTS DECIMAL_UDF2_txt; +DROP TABLE IF EXISTS DECIMAL_UDF2; Index: ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q =================================================================== --- ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q (revision 1637277) +++ ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q (working copy) @@ -3,45 +3,6 @@ -- SORT_QUERY_RESULTS -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -DROP TABLE lineitem; -CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; - -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; - -- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. -- Query copied from subquery_in.q Index: ql/src/test/queries/clientpositive/vector_multi_insert.q =================================================================== --- ql/src/test/queries/clientpositive/vector_multi_insert.q (revision 0) +++ ql/src/test/queries/clientpositive/vector_multi_insert.q (working copy) @@ -0,0 +1,34 @@ +SET hive.vectorized.execution.enabled=true; +set hive.fetch.task.conversion=minimal; + +create table orc1 + stored as orc + tblproperties("orc.compress"="ZLIB") + as + select rn + from + ( + select cast(1 as int) as rn from src limit 1 + union all + select cast(100 as int) as rn from src limit 1 + union all + select cast(10000 as int) as rn from src limit 1 + ) t; + +create table orc_rn1 (rn int); +create table orc_rn2 (rn int); +create table orc_rn3 (rn int); + +explain from orc1 a +insert overwrite table orc_rn1 select a.* where a.rn < 100 +insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000 +insert overwrite table orc_rn3 select a.* where a.rn >= 1000; + +from orc1 a +insert overwrite table orc_rn1 select a.* where a.rn < 100 +insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000 +insert overwrite table orc_rn3 select a.* where a.rn >= 1000; + +select * from orc_rn1; +select * from orc_rn2; +select * from orc_rn3; Index: ql/src/test/queries/clientpositive/vectorized_ptf.q =================================================================== --- ql/src/test/queries/clientpositive/vectorized_ptf.q (revision 1637277) +++ ql/src/test/queries/clientpositive/vectorized_ptf.q (working copy) @@ -1,7 +1,7 @@ SET hive.vectorized.execution.enabled=true; DROP TABLE part_staging; -DROP TABLE part; +DROP TABLE part_orc; -- NOTE: This test is a copy of ptf. -- NOTE: We cannot vectorize "pure" table functions (e.g. NOOP) -- given their blackbox nature. So only queries without table functions and @@ -22,7 +22,7 @@ LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part_staging; -CREATE TABLE part( +CREATE TABLE part_orc( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -34,9 +34,9 @@ p_comment STRING ) STORED AS ORC; -DESCRIBE EXTENDED part; +DESCRIBE EXTENDED part_orc; -insert into table part select * from part_staging; +insert into table part_orc select * from part_staging; --1. test1 @@ -45,7 +45,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ); @@ -54,7 +54,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ); @@ -64,14 +64,14 @@ explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) ; select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) ; @@ -80,12 +80,12 @@ explain extended select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name); select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name); @@ -96,7 +96,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc; @@ -105,7 +105,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc; @@ -117,7 +117,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -127,7 +127,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -140,7 +140,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -151,7 +151,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -162,28 +162,28 @@ explain extended select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey; +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey; select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey; +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey; -- 8. testJoinRight explain extended select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey; select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey; @@ -193,13 +193,13 @@ explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc); select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc); @@ -210,7 +210,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name); @@ -218,7 +218,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name); @@ -229,7 +229,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) ; @@ -238,7 +238,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) ; @@ -250,7 +250,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))); @@ -259,7 +259,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))); @@ -273,7 +273,7 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -285,7 +285,7 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -300,10 +300,10 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey ; select abc.p_mfgr, abc.p_name, @@ -312,22 +312,22 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey ; -- 15. testDistinctInSelectWithPTF explain extended select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name); select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name); @@ -336,7 +336,7 @@ create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s -from part +from part_orc group by p_mfgr, p_brand; explain extended @@ -374,7 +374,7 @@ fv1 INT); explain extended -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -389,7 +389,7 @@ first_value(p_size, true) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following); -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -418,7 +418,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -434,7 +434,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -453,7 +453,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -469,7 +469,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -488,7 +488,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -502,7 +502,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -520,7 +520,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -536,7 +536,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -556,7 +556,7 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr @@ -571,7 +571,7 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr @@ -589,7 +589,7 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )); @@ -602,7 +602,7 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )); Index: ql/src/test/queries/clientpositive/windowing.q =================================================================== --- ql/src/test/queries/clientpositive/windowing.q (revision 1637277) +++ ql/src/test/queries/clientpositive/windowing.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- 1. testWindowing select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, Index: ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q =================================================================== --- ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q (revision 1637277) +++ ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - set hive.join.cache.size=1; select p_mfgr, p_name, p_size, Index: ql/src/test/queries/clientpositive/windowing_columnPruning.q =================================================================== --- ql/src/test/queries/clientpositive/windowing_columnPruning.q (revision 1637277) +++ ql/src/test/queries/clientpositive/windowing_columnPruning.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - -- 1. testQueryLevelPartitionColsNotInSelect select p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 Index: ql/src/test/queries/clientpositive/windowing_decimal.q =================================================================== --- ql/src/test/queries/clientpositive/windowing_decimal.q (revision 1637277) +++ ql/src/test/queries/clientpositive/windowing_decimal.q (working copy) @@ -1,21 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - - create table part_dec( p_partkey INT, p_name STRING, Index: ql/src/test/queries/clientpositive/windowing_expressions.q =================================================================== --- ql/src/test/queries/clientpositive/windowing_expressions.q (revision 1637277) +++ ql/src/test/queries/clientpositive/windowing_expressions.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - drop table over10k; create table over10k( Index: ql/src/test/queries/clientpositive/windowing_streaming.q =================================================================== --- ql/src/test/queries/clientpositive/windowing_streaming.q (revision 1637277) +++ ql/src/test/queries/clientpositive/windowing_streaming.q (working copy) @@ -1,20 +1,3 @@ -DROP TABLE if exists part; - --- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -); - -LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; - drop table over10k; create table over10k( Index: ql/src/test/results/clientnegative/limit_partition_stats.q.out =================================================================== --- ql/src/test/results/clientnegative/limit_partition_stats.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/limit_partition_stats.q.out (working copy) @@ -2,102 +2,7 @@ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part -POSTHOOK: query: create table part (c int) partitioned by (d string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: insert into table part partition (d) -select hr,ds from srcpart -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@part -POSTHOOK: query: insert into table part partition (d) -select hr,ds from srcpart -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@part@d=2008-04-08 -POSTHOOK: Output: default@part@d=2008-04-09 -POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] -PREHOOK: query: explain select count(*) from part -PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from part -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: 1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from part -PREHOOK: type: QUERY -PREHOOK: Input: default@part +FAILED: Hive Internal Error: java.lang.RuntimeException(Cannot overwrite read-only table: part) +java.lang.RuntimeException: Cannot overwrite read-only table: part #### A masked pattern was here #### -POSTHOOK: query: select count(*) from part -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -2000 -PREHOOK: query: explain select count(*) from part -PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from part -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: part - Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -FAILED: SemanticException Number of partitions scanned (=2) on table part exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition. Index: ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out (working copy) @@ -1,41 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -FAILED: SemanticException [Error 10025]: Line 4:7 Expression not in GROUP BY key 'p_mfgr' +FAILED: SemanticException [Error 10025]: Line 2:7 Expression not in GROUP BY key 'p_mfgr' Index: ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out (working copy) @@ -1,41 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part FAILED: SemanticException Cycle in Window references [w3, w3] Index: ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out (working copy) @@ -1,34 +1,2 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: ParseException line 5:46 missing ) at 'order' near 'p_mfgr' -line 5:61 missing EOF at ')' near 'p_mfgr' +FAILED: ParseException line 3:46 missing ) at 'order' near 'p_mfgr' +line 3:61 missing EOF at ')' near 'p_mfgr' Index: ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: SemanticException 10:7 Duplicate definition of window w2 is not allowed. Error encountered near token 'w2' +FAILED: SemanticException 8:7 Duplicate definition of window w2 is not allowed. Error encountered near token 'w2' Index: ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part FAILED: SemanticException HAVING specified without GROUP BY Index: ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part FAILED: SemanticException HAVING specified without GROUP BY Index: ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out (working copy) @@ -1,36 +1,2 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING, - p_complex array -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING, - p_complex array -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part FAILED: SemanticException Failed to breakup Windowing invocations into Groups. At least 1 group must only depend on input columns. Also check for circular dependencies. -Underlying error: Value Boundary expression must be of primitve type. Found: array +Underlying error: org.apache.hadoop.hive.ql.parse.SemanticException: Line 6:43 Invalid table alias or column reference 'p_complex': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment) Index: ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: SemanticException [Error 10008]: Line 8:11 Ambiguous table alias 'part' +FAILED: SemanticException [Error 10008]: Line 6:11 Ambiguous table alias 'part' Index: ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out (working copy) @@ -1,34 +1,2 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: ParseException line 5:45 missing ) at 'sort' near 'p_mfgr' -line 5:59 missing EOF at ')' near 'p_mfgr' +FAILED: ParseException line 3:45 missing ) at 'sort' near 'p_mfgr' +line 3:59 missing EOF at ')' near 'p_mfgr' Index: ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: SemanticException [Error 10004]: Line 7:6 Invalid table alias or column reference 'r': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment) +FAILED: SemanticException [Error 10004]: Line 5:6 Invalid table alias or column reference 'r': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment) Index: ql/src/test/results/clientnegative/ptf_window_boundaries.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_window_boundaries.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_window_boundaries.q.out (working copy) @@ -1,31 +1 @@ -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: ParseException line 4:44 mismatched input 'following' expecting KW_PRECEDING near 'unbounded' in windowframestartboundary +FAILED: ParseException line 2:44 mismatched input 'following' expecting KW_PRECEDING near 'unbounded' in windowframestartboundary Index: ql/src/test/results/clientnegative/ptf_window_boundaries2.q.out =================================================================== --- ql/src/test/results/clientnegative/ptf_window_boundaries2.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/ptf_window_boundaries2.q.out (working copy) @@ -1,31 +1 @@ -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: ParseException line 4:45 mismatched input 'following' expecting KW_PRECEDING near 'unbounded' in windowframestartboundary +FAILED: ParseException line 2:45 mismatched input 'following' expecting KW_PRECEDING near 'unbounded' in windowframestartboundary Index: ql/src/test/results/clientnegative/subquery_nested_subquery.q.out =================================================================== --- ql/src/test/results/clientnegative/subquery_nested_subquery.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/subquery_nested_subquery.q.out (working copy) @@ -1,31 +1,3 @@ -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -FAILED: SemanticException Line 5:53 Unsupported SubQuery Expression 'p_name' in definition of SubQuery sq_1 [ +FAILED: SemanticException Line 3:53 Unsupported SubQuery Expression 'p_name' in definition of SubQuery sq_1 [ x.p_name in (select y.p_name from part y where exists (select z.p_name from part z where y.p_name = z.p_name)) -] used as sq_1 at Line 5:15: Nested SubQuery expressions are not supported. +] used as sq_1 at Line 3:15: Nested SubQuery expressions are not supported. Index: ql/src/test/results/clientnegative/subquery_windowing_corr.q.out =================================================================== --- ql/src/test/results/clientnegative/subquery_windowing_corr.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/subquery_windowing_corr.q.out (working copy) @@ -1,48 +1,6 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -FAILED: SemanticException Line 9:8 Unsupported SubQuery Expression '1' in definition of SubQuery sq_1 [ +FAILED: SemanticException Line 6:8 Unsupported SubQuery Expression '1' in definition of SubQuery sq_1 [ a.p_size in (select first_value(p_size) over(partition by p_mfgr order by p_size) from part b where a.p_brand = b.p_brand) -] used as sq_1 at Line 7:15: Correlated Sub Queries cannot contain Windowing clauses. +] used as sq_1 at Line 4:15: Correlated Sub Queries cannot contain Windowing clauses. Index: ql/src/test/results/clientnegative/udf_assert_true.q.out =================================================================== --- ql/src/test/results/clientnegative/udf_assert_true.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/udf_assert_true.q.out (working copy) @@ -21,10 +21,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: assert_true((_col5 > 0)) (type: void) outputColumnNames: _col0 @@ -48,7 +48,7 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: assert_true((_col5 > 0)) (type: void) outputColumnNames: _col0 @@ -98,10 +98,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: assert_true((_col5 < 2)) (type: void) outputColumnNames: _col0 @@ -125,7 +125,7 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: assert_true((_col5 < 2)) (type: void) outputColumnNames: _col0 Index: ql/src/test/results/clientnegative/udf_assert_true2.q.out =================================================================== --- ql/src/test/results/clientnegative/udf_assert_true2.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/udf_assert_true2.q.out (working copy) @@ -16,10 +16,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: (1 + assert_true((_col5 < 2))) (type: double) outputColumnNames: _col0 @@ -43,7 +43,7 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: (1 + assert_true((_col5 < 2))) (type: double) outputColumnNames: _col0 Index: ql/src/test/results/clientnegative/windowing_leadlag_in_udaf.q.out =================================================================== --- ql/src/test/results/clientnegative/windowing_leadlag_in_udaf.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/windowing_leadlag_in_udaf.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part FAILED: SemanticException [Error 10247]: Missing over clause for function : sum Index: ql/src/test/results/clientnegative/windowing_ll_no_neg.q.out =================================================================== --- ql/src/test/results/clientnegative/windowing_ll_no_neg.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/windowing_ll_no_neg.q.out (working copy) @@ -1,44 +1,2 @@ -PREHOOK: query: DROP TABLE IF EXISTS part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part FAILED: SemanticException Failed to breakup Windowing invocations into Groups. At least 1 group must only depend on input columns. Also check for circular dependencies. Underlying error: org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException: Lag amount can not be nagative. Specified: -1 Index: ql/src/test/results/clientnegative/windowing_ll_no_over.q.out =================================================================== --- ql/src/test/results/clientnegative/windowing_ll_no_over.q.out (revision 1637277) +++ ql/src/test/results/clientnegative/windowing_ll_no_over.q.out (working copy) @@ -1,33 +1 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part FAILED: SemanticException [Error 10247]: Missing over clause for function : lead Index: ql/src/test/results/clientpositive/add_part_exist.q.out =================================================================== --- ql/src/test/results/clientpositive/add_part_exist.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/add_part_exist.q.out (working copy) @@ -83,8 +83,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter1.q.out =================================================================== --- ql/src/test/results/clientpositive/alter1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter1.q.out (working copy) @@ -192,8 +192,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter2.q.out =================================================================== --- ql/src/test/results/clientpositive/alter2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter2.q.out (working copy) @@ -211,8 +211,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter3.q.out =================================================================== --- ql/src/test/results/clientpositive/alter3.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter3.q.out (working copy) @@ -184,8 +184,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter4.q.out =================================================================== --- ql/src/test/results/clientpositive/alter4.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter4.q.out (working copy) @@ -49,8 +49,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter5.q.out =================================================================== --- ql/src/test/results/clientpositive/alter5.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter5.q.out (working copy) @@ -125,8 +125,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter_index.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_index.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter_index.q.out (working copy) @@ -48,8 +48,14 @@ POSTHOOK: query: show tables POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/alter_rename_partition.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_rename_partition.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/alter_rename_partition.q.out (working copy) @@ -13,8 +13,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift @@ -134,8 +140,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/annotate_stats_groupby.q.out =================================================================== --- ql/src/test/results/clientpositive/annotate_stats_groupby.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/annotate_stats_groupby.q.out (working copy) @@ -177,17 +177,17 @@ keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 658 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 658 Basic stats: COMPLETE Column stats: PARTIAL Group By Operator aggregations: min(_col1) keys: _col0 (type: string), _col2 (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 832 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -203,7 +203,7 @@ key expressions: _col0 (type: string), _col1 (type: bigint) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) - Statistics: Num rows: 8 Data size: 832 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col2 (type: int) Reduce Operator Tree: Group By Operator @@ -211,14 +211,14 @@ keys: KEY._col0 (type: string), KEY._col1 (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 832 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 832 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 832 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/auto_join33.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join33.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join33.q.out (working copy) @@ -0,0 +1,113 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +explain +SELECT * FROM + (SELECT * FROM src WHERE key+1 < 10) a + JOIN + (SELECT * FROM src WHERE key+2 < 10) b + ON a.key+1=b.key+2 +PREHOOK: type: QUERY +POSTHOOK: query: -- SORT_QUERY_RESULTS + +explain +SELECT * FROM + (SELECT * FROM src WHERE key+1 < 10) a + JOIN + (SELECT * FROM src WHERE key+2 < 10) b + ON a.key+1=b.key+2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + a:src + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a:src + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key + 1) < 10) and (key + 1) is not null) (type: boolean) + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {_col0} {_col1} + 1 {_col0} {_col1} + keys: + 0 (_col0 + 1) (type: double) + 1 (_col0 + 2) (type: double) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key + 2) < 10) and (key + 2) is not null) (type: boolean) + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col0} {_col1} + keys: + 0 (_col0 + 1) (type: double) + 1 (_col0 + 2) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM + (SELECT * FROM src WHERE key+1 < 10) a + JOIN + (SELECT * FROM src WHERE key+2 < 10) b + ON a.key+1=b.key+2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM + (SELECT * FROM src WHERE key+1 < 10) a + JOIN + (SELECT * FROM src WHERE key+2 < 10) b + ON a.key+1=b.key+2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 4 val_4 Index: ql/src/test/results/clientpositive/cbo_correctness.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_correctness.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/cbo_correctness.q.out (working copy) @@ -1,19107 +0,0 @@ -PREHOOK: query: drop table if exists t1 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t1 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists t2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists t3 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t3 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@dt=2014 -PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t2@dt=2014 -PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: create table src_cbo as select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@src_cbo -POSTHOOK: query: create table src_cbo as select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_cbo -PREHOOK: query: analyze table t1 partition (dt) compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t1@dt=2014 -POSTHOOK: query: analyze table t1 partition (dt) compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@dt=2014 -PREHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: analyze table t2 partition (dt) compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t2@dt=2014 -POSTHOOK: query: analyze table t2 partition (dt) compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t2@dt=2014 -PREHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: analyze table t3 compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -PREHOOK: Output: default@t3 -POSTHOOK: query: analyze table t3 compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -POSTHOOK: Output: default@t3 -PREHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -PREHOOK: query: analyze table src_cbo compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -PREHOOK: Output: default@src_cbo -POSTHOOK: query: analyze table src_cbo compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -POSTHOOK: Output: default@src_cbo -PREHOOK: query: analyze table src_cbo compute statistics for columns -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: analyze table src_cbo compute statistics for columns -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -PREHOOK: query: analyze table part compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@part -PREHOOK: Output: default@part -POSTHOOK: query: analyze table part compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -POSTHOOK: Output: default@part -PREHOOK: query: analyze table part compute statistics for columns -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: analyze table part compute statistics for columns -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -PREHOOK: query: analyze table lineitem compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -PREHOOK: Output: default@lineitem -POSTHOOK: query: analyze table lineitem compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -POSTHOOK: Output: default@lineitem -PREHOOK: query: analyze table lineitem compute statistics for columns -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -#### A masked pattern was here #### -POSTHOOK: query: analyze table lineitem compute statistics for columns -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -#### A masked pattern was here #### -PREHOOK: query: -- 1. Test Select + TS -select * from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 1. Test Select + TS -select * from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select * from t1 as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select * from t1 as t2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -null NULL NULL -null NULL NULL -PREHOOK: query: -- 2. Test Select + TS + FIL -select * from t1 where t1.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 2. Test Select + TS + FIL -select * from t1 where t1.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from t1 where t1.c_int >= 0) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from t1 where t1.c_int >= 0) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: -- 4. Test Select + Join + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 4. Test Select + Join + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select t1.key from t1 join t3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select t1.key from t1 join t3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -PREHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -PREHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: -- 5. Test Select + Join + FIL + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 5. Test Select + Join + FIL + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having -select * from t1 group by c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having -select * from t1 group by c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL -1 -PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -null NULL NULL - 1 4 2 - 1 4 2 -1 4 12 -1 4 2 -PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL NULL -2 5.0 -12 5.0 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 2 -1 12 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 2 -1 12 -PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select * from t1 group by c_int limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select * from t1 group by c_int limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL -PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -null NULL NULL -PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL NULL -PREHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -null NULL -null NULL -1 1 -1 1 -1 1 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: -- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -20 18 18 1.0 1 1 -PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2 0 NULL NULL NULL NULL 3 6 -18 18 18 1.0 1 1 2 36 -PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -20 1 18 1.0 1 1 -PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2 0 NULL NULL NULL NULL 3 6 -18 1 18 1.0 1 1 2 36 -PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 20 1 18 -PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 20 1 1 -PREHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2 1.0 1 -2 1.0 1 -12 1.0 1 -2 1.0 1 -0 NULL null -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: -- 9. Test Windowing Functions -select count(c_int) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 9. Test Windowing Functions -select count(c_int) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 18.0 1 1 1 1 1 0.0 1 NULL -18 18.0 1 1 2 1 1 0.0 1 NULL -18 18.0 1 1 3 1 1 0.0 1 NULL -18 18.0 1 1 4 1 1 0.0 1 NULL -18 18.0 1 1 5 1 1 0.0 1 1.0 -18 18.0 1 1 6 1 1 0.0 1 1.0 -18 18.0 1 1 7 1 1 0.0 1 1.0 -18 18.0 1 1 8 1 1 0.0 1 1.0 -18 18.0 1 1 9 1 1 0.0 1 1.0 -18 18.0 1 1 10 1 1 0.0 1 1.0 -18 18.0 1 1 11 1 1 0.0 1 1.0 -18 18.0 1 1 12 1 1 0.0 1 1.0 -18 18.0 1 1 13 1 1 0.0 1 1.0 -18 18.0 1 1 14 1 1 0.0 1 1.0 -18 18.0 1 1 15 1 1 0.0 1 1.0 -18 18.0 1 1 16 1 1 0.0 1 1.0 -18 18.0 1 1 17 1 1 0.0 1 1.0 -18 18.0 1 1 18 1 1 0.0 1 1.0 -18 18.0 1 1 19 1 1 0.0 1 1.0 -18 18.0 1 1 20 1 1 0.0 1 1.0 -PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 18.0 1 1 1 1 1 0.0 1 NULL -18 18.0 1 1 2 1 1 0.0 1 NULL -18 18.0 1 1 3 1 1 0.0 1 NULL -18 18.0 1 1 4 1 1 0.0 1 NULL -18 18.0 1 1 5 1 1 0.0 1 1.0 -18 18.0 1 1 6 1 1 0.0 1 1.0 -18 18.0 1 1 7 1 1 0.0 1 1.0 -18 18.0 1 1 8 1 1 0.0 1 1.0 -18 18.0 1 1 9 1 1 0.0 1 1.0 -18 18.0 1 1 10 1 1 0.0 1 1.0 -18 18.0 1 1 11 1 1 0.0 1 1.0 -18 18.0 1 1 12 1 1 0.0 1 1.0 -18 18.0 1 1 13 1 1 0.0 1 1.0 -18 18.0 1 1 14 1 1 0.0 1 1.0 -18 18.0 1 1 15 1 1 0.0 1 1.0 -18 18.0 1 1 16 1 1 0.0 1 1.0 -18 18.0 1 1 17 1 1 0.0 1 1.0 -18 18.0 1 1 18 1 1 0.0 1 1.0 -18 18.0 1 1 19 1 1 0.0 1 1.0 -18 18.0 1 1 20 1 1 0.0 1 1.0 -PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -PREHOOK: query: select 1+sum(c_int) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select 1+sum(c_int) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -36 -PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -PREHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from t1 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from t1 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -PREHOOK: query: create view v2 as select c_int, value from t2 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t2 -PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select c_int, value from t2 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t2 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -PREHOOK: query: select value from v1 where c_boolean=false -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select value from v1 where c_boolean=false -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -1 -1 -PREHOOK: query: select max(c_int) from v1 group by (c_boolean) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -NULL -1 -1 -PREHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -234 -PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 -#### A masked pattern was here #### -POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 -#### A masked pattern was here #### -234 -PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -160 -PREHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 -PREHOOK: Input: default@v1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v3 -POSTHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@v1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v3 -PREHOOK: query: select count(val) from v3 where val != '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v3 -#### A masked pattern was here #### -POSTHOOK: query: select count(val) from v3 where val != '1' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v3 -#### A masked pattern was here #### -96 -PREHOOK: query: with q1 as ( select key from t1 where key = '1') -select count(*) from q1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select key from t1 where key = '1') -select count(*) from q1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -12 -PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) -select count(value) from q1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) -select count(value) from q1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -2 -PREHOOK: query: create view v4 as -with q1 as ( select key,c_int from t1 where key = '1') -select * from q1 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v4 -POSTHOOK: query: create view v4 as -with q1 as ( select key,c_int from t1 where key = '1') -select * from q1 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v4 -PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') -select sum(c_int) from (select c_int from q1) a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') -select sum(c_int) from (select c_int from q1) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -2 -PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v4 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v4 -#### A masked pattern was here #### -31104 -PREHOOK: query: drop view v1 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: drop view v2 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v2 -PREHOOK: Output: default@v2 -POSTHOOK: query: drop view v2 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v2 -POSTHOOK: Output: default@v2 -PREHOOK: query: drop view v3 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v3 -PREHOOK: Output: default@v3 -POSTHOOK: query: drop view v3 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v3 -POSTHOOK: Output: default@v3 -PREHOOK: query: drop view v4 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v4 -PREHOOK: Output: default@v4 -POSTHOOK: query: drop view v4 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v4 -POSTHOOK: Output: default@v4 -PREHOOK: query: -- 11. Union All -select * from t1 union all select * from t2 order by key, c_boolean, value, dt -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 11. Union All -select * from t1 union all select * from t2 order by key, c_boolean, value, dt -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -2 -2 -2 -2 -2 -2 -2 -3 -3 -3 -PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -PREHOOK: query: -- 12. SemiJoin -select t1.c_int from t1 left semi join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 12. SemiJoin -select t1.c_int from t1 left semi join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -NULL -NULL -PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -PREHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 2 - 1 2 -1 2 -1 12 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 2 - 1 2 -1 2 -1 12 -PREHOOK: query: -- 13. null expr in select list -select null from t3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: -- 13. null expr in select list -select null from t3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -PREHOOK: query: -- 14. unary operator -select key from t1 where c_int = -6 or c_int = +6 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 14. unary operator -select key from t1 where c_int = -6 or c_int = +6 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: -- 15. query referencing only partition columns -select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 15. query referencing only partition columns -select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -400 -PREHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * -from src_cbo -where src_cbo.key not in - ( select key from src_cbo s1 - where s1.key > '2' - ) order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * -from src_cbo -where src_cbo.key not in - ( select key from src_cbo s1 - where s1.key > '2' - ) order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -10 val_10 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -15 val_15 -15 val_15 -150 val_150 -152 val_152 -152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -17 val_17 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -18 val_18 -18 val_18 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -19 val_19 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -2 val_2 -PREHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size as r from part) a - where r < 10 and b.p_mfgr = a.p_mfgr - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size as r from part) a - where r < 10 and b.p_mfgr = a.p_mfgr - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#5 almond antique blue firebrick mint 31 -Manufacturer#3 almond antique chartreuse khaki white 17 -Manufacturer#1 almond antique chartreuse lavender yellow 34 -Manufacturer#3 almond antique forest lavender goldenrod 14 -Manufacturer#4 almond antique gainsboro frosted violet 10 -Manufacturer#3 almond antique metallic orange dim 19 -Manufacturer#3 almond antique olive coral navajo 45 -Manufacturer#2 almond antique violet chocolate turquoise 14 -Manufacturer#4 almond antique violet mint lemon 39 -Manufacturer#2 almond antique violet turquoise frosted 40 -Manufacturer#1 almond aquamarine burnished black steel 28 -Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Manufacturer#4 almond aquamarine floral ivory bisque 27 -Manufacturer#1 almond aquamarine pink moccasin thistle 42 -Manufacturer#2 almond aquamarine rose maroon antique 25 -Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -Manufacturer#4 almond azure aquamarine papaya violet 12 -Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, non corr -select p_name, p_size -from -part where part.p_size not in - (select avg(p_size) - from (select p_size from part) a - where p_size < 10 - ) order by p_name -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr -select p_name, p_size -from -part where part.p_size not in - (select avg(p_size) - from (select p_size from part) a - where p_size < 10 - ) order by p_name -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -almond antique blue firebrick mint 31 -almond antique burnished rose metallic 2 -almond antique burnished rose metallic 2 -almond antique chartreuse khaki white 17 -almond antique chartreuse lavender yellow 34 -almond antique forest lavender goldenrod 14 -almond antique gainsboro frosted violet 10 -almond antique medium spring khaki 6 -almond antique metallic orange dim 19 -almond antique misty red olive 1 -almond antique olive coral navajo 45 -almond antique salmon chartreuse burlywood 6 -almond antique sky peru orange 2 -almond antique violet chocolate turquoise 14 -almond antique violet mint lemon 39 -almond antique violet turquoise frosted 40 -almond aquamarine burnished black steel 28 -almond aquamarine dodger light gainsboro 46 -almond aquamarine floral ivory bisque 27 -almond aquamarine midnight light salmon 2 -almond aquamarine pink moccasin thistle 42 -almond aquamarine rose maroon antique 25 -almond aquamarine sandy cyan gainsboro 18 -almond aquamarine yellow dodger mint 7 -almond azure aquamarine papaya violet 12 -almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size from part) a - where p_size < 10 and b.p_mfgr = a.p_mfgr - ) order by p_name -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size from part) a - where p_size < 10 and b.p_mfgr = a.p_mfgr - ) order by p_name -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#5 almond antique blue firebrick mint 31 -Manufacturer#3 almond antique chartreuse khaki white 17 -Manufacturer#1 almond antique chartreuse lavender yellow 34 -Manufacturer#3 almond antique forest lavender goldenrod 14 -Manufacturer#4 almond antique gainsboro frosted violet 10 -Manufacturer#5 almond antique medium spring khaki 6 -Manufacturer#3 almond antique metallic orange dim 19 -Manufacturer#3 almond antique olive coral navajo 45 -Manufacturer#1 almond antique salmon chartreuse burlywood 6 -Manufacturer#2 almond antique violet chocolate turquoise 14 -Manufacturer#4 almond antique violet mint lemon 39 -Manufacturer#2 almond antique violet turquoise frosted 40 -Manufacturer#1 almond aquamarine burnished black steel 28 -Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Manufacturer#4 almond aquamarine floral ivory bisque 27 -Manufacturer#1 almond aquamarine pink moccasin thistle 42 -Manufacturer#2 almond aquamarine rose maroon antique 25 -Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -Manufacturer#4 almond azure aquamarine papaya violet 12 -Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) -from lineitem li -where li.l_linenumber = 1 and - li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') -group by li.l_partkey -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) -from lineitem li -where li.l_linenumber = 1 and - li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') -group by li.l_partkey -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -#### A masked pattern was here #### -450 1 -7068 1 -21636 1 -22630 1 -59694 1 -61931 1 -85951 1 -88035 1 -88362 1 -106170 1 -119477 1 -119767 1 -123076 1 -139636 1 -175839 1 -182052 1 -PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a - where min(p_retailprice) = l and r - l > 600 - ) - order by b.p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a - where min(p_retailprice) = l and r - l > 600 - ) - order by b.p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#1 1173.15 -Manufacturer#2 1690.68 -PREHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from part a - group by p_mfgr - having max(p_retailprice) - min(p_retailprice) > 600 - ) - order by b.p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from part a - group by p_mfgr - having max(p_retailprice) - min(p_retailprice) > 600 - ) - order by b.p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#1 1173.15 -Manufacturer#2 1690.68 -PREHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * -from src_cbo -where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * -from src_cbo -where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * -from src_cbo b -where b.key in - (select distinct a.key - from src_cbo a - where b.value = a.value and a.key > '9' - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * -from src_cbo b -where b.key in - (select distinct a.key - from src_cbo a - where b.value = a.value and a.key > '9' - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey -where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey -where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -#### A masked pattern was here #### -4297 1798 -108570 8571 -PREHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) -from src_cbo b -where b.key in (select key from src_cbo where src_cbo.key > '8') -group by key, value -having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) -from src_cbo b -where b.key in (select key from src_cbo where src_cbo.key > '8') -group by key, value -having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -80 val_80 1 -96 val_96 1 -92 val_92 1 -9 val_9 1 -87 val_87 1 -86 val_86 1 -85 val_85 1 -82 val_82 1 -84 val_84 2 -95 val_95 2 -83 val_83 2 -98 val_98 2 -97 val_97 2 -90 val_90 3 -PREHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) -from part -group by p_mfgr, p_name -having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) -from part -group by p_mfgr, p_name -having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#1 almond antique burnished rose metallic 2.0 -Manufacturer#3 almond antique misty red olive 1.0 -Manufacturer#5 almond antique sky peru orange 2.0 -Manufacturer#2 almond aquamarine midnight light salmon 2.0 -Manufacturer#4 almond aquamarine yellow dodger mint 7.0 -PREHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * -from src_cbo b -where not exists - (select distinct a.key - from src_cbo a - where b.value = a.value and a.value > 'val_2' - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * -from src_cbo b -where not exists - (select distinct a.key - from src_cbo a - where b.value = a.value and a.value > 'val_2' - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -10 val_10 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -15 val_15 -15 val_15 -150 val_150 -152 val_152 -152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -17 val_17 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -18 val_18 -18 val_18 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -19 val_19 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -2 val_2 -PREHOOK: query: -- no agg, corr, having -select * -from src_cbo b -group by key, value -having not exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_12' - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- no agg, corr, having -select * -from src_cbo b -group by key, value -having not exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_12' - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -119 val_119 -12 val_12 -PREHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as -select * -from src_cbo b -where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_cbo -PREHOOK: Output: database:default -PREHOOK: Output: default@cv1 -POSTHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as -select * -from src_cbo b -where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_cbo -POSTHOOK: Output: database:default -POSTHOOK: Output: default@cv1 -PREHOOK: query: select * from cv1 -PREHOOK: type: QUERY -PREHOOK: Input: default@cv1 -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: select * from cv1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cv1 -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- sq in from -select * -from (select * - from src_cbo b - where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') - ) a -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * -from (select * - from src_cbo b - where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') - ) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- sq in from, having -select * -from (select b.key, count(*) - from src_cbo b - group by b.key - having exists - (select a.key - from src_cbo a - where a.key = b.key and a.value > 'val_9' - ) -) a -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- sq in from, having -select * -from (select b.key, count(*) - from src_cbo b - group by b.key - having exists - (select a.key - from src_cbo a - where a.key = b.key and a.value > 'val_9' - ) -) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 3 -92 1 -95 2 -96 1 -97 2 -98 2 -PREHOOK: query: -- 20. Test get stats with empty partition list -select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 20. Test get stats with empty partition list -select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -tst1 -PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -tst1 500 -PREHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -avg -max -min -PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -avg 1.5 -max 3.0 -min 1.0 -PREHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc group by unionsrc.key order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc group by unionsrc.key order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -avg 1 -max 1 -min 1 -PREHOOK: query: -- Windowing -select *, rank() over(partition by key order by value) as rr from src1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: -- Windowing -select *, rank() over(partition by key order by value) as rr from src1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - val_165 5 - val_193 6 - val_265 7 - val_27 8 - val_409 9 - val_484 10 -128 1 -146 val_146 1 -150 val_150 1 -213 val_213 1 -224 1 -238 val_238 1 -255 val_255 1 -273 val_273 1 -278 val_278 1 -311 val_311 1 -369 1 -401 val_401 1 -406 val_406 1 -66 val_66 1 -98 val_98 1 -PREHOOK: query: select *, rank() over(partition by key order by value) from src1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: select *, rank() over(partition by key order by value) from src1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - val_165 5 - val_193 6 - val_265 7 - val_27 8 - val_409 9 - val_484 10 -128 1 -146 val_146 1 -150 val_150 1 -213 val_213 1 -224 1 -238 val_238 1 -255 val_255 1 -273 val_273 1 -278 val_278 1 -311 val_311 1 -369 1 -401 val_401 1 -406 val_406 1 -66 val_66 1 -98 val_98 1 Index: ql/src/test/results/clientpositive/cbo_gby.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_gby.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_gby.q.out (working copy) @@ -0,0 +1,132 @@ +PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from cbo_t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from cbo_t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL +1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL + 1 4 2 + 1 4 2 +1 4 12 +1 4 2 +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +2 5.0 +12 5.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 2 +1 12 Index: ql/src/test/results/clientpositive/cbo_gby_empty.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_gby_empty.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_gby_empty.q.out (working copy) @@ -0,0 +1,77 @@ +PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr +select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr +select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +tst1 +PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +tst1 500 +PREHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +avg +max +min +PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +avg 1.5 +max 3.0 +min 1.0 +PREHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +avg 1 +max 1 +min 1 Index: ql/src/test/results/clientpositive/cbo_join.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_join.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_join.q.out (working copy) @@ -0,0 +1,15118 @@ +PREHOOK: query: -- 4. Test Select + Join + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 4. Test Select + Join + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +PREHOOK: query: select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: -- 5. Test Select + Join + FIL + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 5. Test Select + Join + FIL + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or cbo_t2.q >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or cbo_t2.q >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +PREHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 Index: ql/src/test/results/clientpositive/cbo_limit.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_limit.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_limit.q.out (working copy) @@ -0,0 +1,101 @@ +PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from cbo_t1 group by c_int limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from cbo_t1 group by c_int limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +null NULL +null NULL +1 1 +1 1 +1 1 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 Index: ql/src/test/results/clientpositive/cbo_semijoin.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_semijoin.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_semijoin.q.out (working copy) @@ -0,0 +1,442 @@ +PREHOOK: query: -- 12. SemiJoin +select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 12. SemiJoin +select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +NULL +NULL +PREHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +PREHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +PREHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 Index: ql/src/test/results/clientpositive/cbo_simple_select.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_simple_select.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_simple_select.q.out (working copy) @@ -0,0 +1,745 @@ +PREHOOK: query: -- 1. Test Select + TS +select * from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 1. Test Select + TS +select * from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t2 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +null NULL NULL +null NULL NULL +PREHOOK: query: -- 2. Test Select + TS + FIL +select * from cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 2. Test Select + TS + FIL +select * from cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: -- 13. null expr in select list +select null from cbo_t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: -- 13. null expr in select list +select null from cbo_t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- 14. unary operator +select key from cbo_t1 where c_int = -6 or c_int = +6 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 14. unary operator +select key from cbo_t1 where c_int = -6 or c_int = +6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: -- 15. query referencing only partition columns +select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 15. query referencing only partition columns +select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +400 Index: ql/src/test/results/clientpositive/cbo_stats.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_stats.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_stats.q.out (working copy) @@ -0,0 +1,14 @@ +PREHOOK: query: -- 20. Test get stats with empty partition list +select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 20. Test get stats with empty partition list +select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### Index: ql/src/test/results/clientpositive/cbo_subq_exists.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_subq_exists.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_subq_exists.q.out (working copy) @@ -0,0 +1,297 @@ +PREHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +PREHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src_cbo +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 +POSTHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cv1 +PREHOOK: query: select * from cv1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cv1 +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: select * from cv1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 Index: ql/src/test/results/clientpositive/cbo_subq_in.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_subq_in.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_subq_in.q.out (working copy) @@ -0,0 +1,149 @@ +PREHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +4297 1798 +108570 8571 +PREHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +80 val_80 1 +96 val_96 1 +92 val_92 1 +9 val_9 1 +87 val_87 1 +86 val_86 1 +85 val_85 1 +82 val_82 1 +84 val_84 2 +95 val_95 2 +83 val_83 2 +98 val_98 2 +97 val_97 2 +90 val_90 3 +PREHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2.0 +Manufacturer#3 almond antique misty red olive 1.0 +Manufacturer#5 almond antique sky peru orange 2.0 +Manufacturer#2 almond aquamarine midnight light salmon 2.0 +Manufacturer#4 almond aquamarine yellow dodger mint 7.0 Index: ql/src/test/results/clientpositive/cbo_subq_not_in.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_subq_not_in.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_subq_not_in.q.out (working copy) @@ -0,0 +1,365 @@ +PREHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +almond antique blue firebrick mint 31 +almond antique burnished rose metallic 2 +almond antique burnished rose metallic 2 +almond antique chartreuse khaki white 17 +almond antique chartreuse lavender yellow 34 +almond antique forest lavender goldenrod 14 +almond antique gainsboro frosted violet 10 +almond antique medium spring khaki 6 +almond antique metallic orange dim 19 +almond antique misty red olive 1 +almond antique olive coral navajo 45 +almond antique salmon chartreuse burlywood 6 +almond antique sky peru orange 2 +almond antique violet chocolate turquoise 14 +almond antique violet mint lemon 39 +almond antique violet turquoise frosted 40 +almond aquamarine burnished black steel 28 +almond aquamarine dodger light gainsboro 46 +almond aquamarine floral ivory bisque 27 +almond aquamarine midnight light salmon 2 +almond aquamarine pink moccasin thistle 42 +almond aquamarine rose maroon antique 25 +almond aquamarine sandy cyan gainsboro 18 +almond aquamarine yellow dodger mint 7 +almond azure aquamarine papaya violet 12 +almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +450 1 +7068 1 +21636 1 +22630 1 +59694 1 +61931 1 +85951 1 +88035 1 +88362 1 +106170 1 +119477 1 +119767 1 +123076 1 +139636 1 +175839 1 +182052 1 +PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 Index: ql/src/test/results/clientpositive/cbo_udf_udaf.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_udf_udaf.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_udf_udaf.q.out (working copy) @@ -0,0 +1,121 @@ +PREHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +20 18 18 1.0 1 1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 18 18 1.0 1 1 2 36 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +20 1 18 1.0 1 1 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 1 18 1.0 1 1 2 36 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 20 1 18 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 20 1 1 +PREHOOK: query: select count(c_int) as a, avg(c_float), key from cbo_t1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) as a, avg(c_float), key from cbo_t1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2 1.0 1 +2 1.0 1 +12 1.0 1 +2 1.0 1 +0 NULL null +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 Index: ql/src/test/results/clientpositive/cbo_union.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_union.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_union.q.out (working copy) @@ -0,0 +1,916 @@ +PREHOOK: query: -- 11. Union All +select * from cbo_t1 order by key, c_boolean, value, dt union all select * from cbo_t2 order by key, c_boolean, value, dt +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 11. Union All +select * from cbo_t1 order by key, c_boolean, value, dt union all select * from cbo_t2 order by key, c_boolean, value, dt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 Index: ql/src/test/results/clientpositive/cbo_views.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_views.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_views.q.out (working copy) @@ -0,0 +1,237 @@ +PREHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 +POSTHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v1 +PREHOOK: query: create view v2 as select c_int, value from cbo_t2 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v2 +POSTHOOK: query: create view v2 as select c_int, value from cbo_t2 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v2 +PREHOOK: query: select value from v1 where c_boolean=false +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select value from v1 where c_boolean=false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +1 +1 +PREHOOK: query: select max(c_int) from v1 group by (c_boolean) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +NULL +1 +1 +PREHOOK: query: select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v2 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v2 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +160 +PREHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@v1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v3 +POSTHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@v1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v3 +PREHOOK: query: select count(val) from v3 where val != '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v3 +#### A masked pattern was here #### +POSTHOOK: query: select count(val) from v3 where val != '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v3 +#### A masked pattern was here #### +96 +PREHOOK: query: with q1 as ( select key from cbo_t1 where key = '1') +select count(*) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select key from cbo_t1 where key = '1') +select count(*) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +12 +PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: create view v4 as +with q1 as ( select key,c_int from cbo_t1 where key = '1') +select * from q1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v4 +POSTHOOK: query: create view v4 as +with q1 as ( select key,c_int from cbo_t1 where key = '1') +select * from q1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v4 +PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v4 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v4 +#### A masked pattern was here #### +31104 +PREHOOK: query: drop view v1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: drop view v1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop view v2 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v2 +PREHOOK: Output: default@v2 +POSTHOOK: query: drop view v2 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v2 +POSTHOOK: Output: default@v2 +PREHOOK: query: drop view v3 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v3 +PREHOOK: Output: default@v3 +POSTHOOK: query: drop view v3 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v3 +POSTHOOK: Output: default@v3 +PREHOOK: query: drop view v4 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v4 +PREHOOK: Output: default@v4 +POSTHOOK: query: drop view v4 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v4 +POSTHOOK: Output: default@v4 Index: ql/src/test/results/clientpositive/cbo_windowing.q.out =================================================================== --- ql/src/test/results/clientpositive/cbo_windowing.q.out (revision 0) +++ ql/src/test/results/clientpositive/cbo_windowing.q.out (working copy) @@ -0,0 +1,289 @@ +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select 1+sum(c_int) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select 1+sum(c_int) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +36 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select *, rank() over(partition by key order by value) as rr from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select *, rank() over(partition by key order by value) as rr from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + val_165 5 + val_193 6 + val_265 7 + val_27 8 + val_409 9 + val_484 10 +128 1 +146 val_146 1 +150 val_150 1 +213 val_213 1 +224 1 +238 val_238 1 +255 val_255 1 +273 val_273 1 +278 val_278 1 +311 val_311 1 +369 1 +401 val_401 1 +406 val_406 1 +66 val_66 1 +98 val_98 1 +PREHOOK: query: select *, rank() over(partition by key order by value) from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select *, rank() over(partition by key order by value) from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + val_165 5 + val_193 6 + val_265 7 + val_27 8 + val_409 9 + val_484 10 +128 1 +146 val_146 1 +150 val_150 1 +213 val_213 1 +224 1 +238 val_238 1 +255 val_255 1 +273 val_273 1 +278 val_278 1 +311 val_311 1 +369 1 +401 val_401 1 +406 val_406 1 +66 val_66 1 +98 val_98 1 Index: ql/src/test/results/clientpositive/describe_table_json.q.out =================================================================== --- ql/src/test/results/clientpositive/describe_table_json.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/describe_table_json.q.out (working copy) @@ -10,7 +10,7 @@ PREHOOK: type: SHOWTABLES POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES -{"tables":["alltypesorc","jsontable","src","src1","src_json","src_sequencefile","src_thrift","srcbucket","srcbucket2","srcpart"]} +{"tables":["alltypesorc","cbo_t1","cbo_t2","cbo_t3","jsontable","lineitem","part","src","src1","src_cbo","src_json","src_sequencefile","src_thrift","srcbucket","srcbucket2","srcpart"]} PREHOOK: query: SHOW TABLES LIKE 'json*' PREHOOK: type: SHOWTABLES POSTHOOK: query: SHOW TABLES LIKE 'json*' Index: ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out =================================================================== --- ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out (revision 0) +++ ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out (working copy) @@ -0,0 +1,1264 @@ +PREHOOK: query: -- single level partition, sorted dynamic partition disabled +drop table acid +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- single level partition, sorted dynamic partition disabled +drop table acid +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid +POSTHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid +PREHOOK: query: insert into table acid partition(ds) select key,value,ds from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@acid +POSTHOOK: query: insert into table acid partition(ds) select key,value,ds from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-09 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1000 +PREHOOK: query: insert into table acid partition(ds='2008-04-08') values("foo", "bar") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: insert into table acid partition(ds='2008-04-08') values("foo", "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).key SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1001 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), '2008-04-08' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-08 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1001 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col3 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string), _col3 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), VALUE._col2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-08 +PREHOOK: query: select count(*) from acid where ds in ('2008-04-08') +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds in ('2008-04-08') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1001 +PREHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-08 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- single level partition, sorted dynamic partition enabled +drop table acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@acid +PREHOOK: Output: default@acid +POSTHOOK: query: -- single level partition, sorted dynamic partition enabled +drop table acid +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@acid +POSTHOOK: Output: default@acid +PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid +POSTHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid +PREHOOK: query: insert into table acid partition(ds) select key,value,ds from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@acid +POSTHOOK: query: insert into table acid partition(ds) select key,value,ds from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-09 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1000 +PREHOOK: query: insert into table acid partition(ds='2008-04-08') values("foo", "bar") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: insert into table acid partition(ds='2008-04-08') values("foo", "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).key SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1001 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), '2008-04-08' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-08 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1001 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col3 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string), _col3 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), VALUE._col2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-08 +PREHOOK: query: select count(*) from acid where ds in ('2008-04-08') +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds in ('2008-04-08') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1001 +PREHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +PREHOOK: Output: default@acid@ds=2008-04-08 +POSTHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +POSTHOOK: Output: default@acid@ds=2008-04-08 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- 2 level partition, sorted dynamic partition disabled +drop table acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@acid +PREHOOK: Output: default@acid +POSTHOOK: query: -- 2 level partition, sorted dynamic partition disabled +drop table acid +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@acid +POSTHOOK: Output: default@acid +PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid +POSTHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid +PREHOOK: query: insert into table acid partition(ds,hr) select * from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@acid +POSTHOOK: query: insert into table acid partition(ds,hr) select * from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +501 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +501 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col4 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string), _col4 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col3 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Input: default@acid@ds=2008-04-08/hr=12 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=12 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Input: default@acid@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1001 +PREHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- 2 level partition, sorted dynamic partition enabled +drop table acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@acid +PREHOOK: Output: default@acid +POSTHOOK: query: -- 2 level partition, sorted dynamic partition enabled +drop table acid +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@acid +POSTHOOK: Output: default@acid +PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid +POSTHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid +PREHOOK: query: insert into table acid partition(ds,hr) select * from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@acid +POSTHOOK: query: insert into table acid partition(ds,hr) select * from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +501 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +501 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col4 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + value expressions: _col1 (type: string), _col4 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col3 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Input: default@acid@ds=2008-04-08/hr=12 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=12 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Input: default@acid@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1001 +PREHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- 2 level partition, sorted dynamic partition enabled, constant propagation disabled +drop table acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@acid +PREHOOK: Output: default@acid +POSTHOOK: query: -- 2 level partition, sorted dynamic partition enabled, constant propagation disabled +drop table acid +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@acid +POSTHOOK: Output: default@acid +PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid +POSTHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string, hr int) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid +PREHOOK: query: insert into table acid partition(ds,hr) select * from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@acid +POSTHOOK: query: insert into table acid partition(ds,hr) select * from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__5 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__5 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +501 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string), value (type: string), ds (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct) + sort order: ++++ + Map-reduce partition columns: _col3 (type: string), _col4 (type: int) + value expressions: _col0 (type: struct), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string) + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +501 +PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: acid + Filter Operator + predicate: (value = 'bar') (type: boolean) + Select Operator + expressions: ROW__ID (type: struct), 'foo' (type: string), value (type: string), ds (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct) + sort order: ++++ + Map-reduce partition columns: _col3 (type: string), _col4 (type: int) + value expressions: _col0 (type: struct), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string) + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Input: default@acid@ds=2008-04-08/hr=12 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=12 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr>=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Input: default@acid@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr>=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1001 +PREHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +PREHOOK: Output: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: query: delete from acid where key = 'foo' and ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@acid@ds=2008-04-08/hr=11 +PREHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +PREHOOK: type: QUERY +PREHOOK: Input: default@acid +PREHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from acid where ds='2008-04-08' and hr=11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid +POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 Index: ql/src/test/results/clientpositive/index_creation.q.out =================================================================== --- ql/src/test/results/clientpositive/index_creation.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/index_creation.q.out (working copy) @@ -302,8 +302,14 @@ POSTHOOK: query: show tables POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/input2.q.out =================================================================== --- ql/src/test/results/clientpositive/input2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/input2.q.out (working copy) @@ -44,8 +44,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift @@ -67,8 +73,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/input3.q.out =================================================================== --- ql/src/test/results/clientpositive/input3.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/input3.q.out (working copy) @@ -36,8 +36,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift @@ -122,8 +128,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/input_part10_win.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part10_win.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/input_part10_win.q.out (working copy) @@ -9,6 +9,8 @@ ts STRING ) PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@part_special POSTHOOK: query: -- INCLUDE_OS_WINDOWS -- included only on windows because of difference in file name encoding logic @@ -20,6 +22,7 @@ ts STRING ) POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default POSTHOOK: Output: default@part_special PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') @@ -29,9 +32,6 @@ INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') SELECT 1, 2 FROM src LIMIT 1 POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME part_special) (TOK_PARTSPEC (TOK_PARTVAL ds '2008 04 08') (TOK_PARTVAL ts '10:11:12=455')))) (TOK_SELECT (TOK_SELEXPR 1) (TOK_SELEXPR 2)) (TOK_LIMIT 1))) - STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 @@ -40,32 +40,32 @@ STAGE PLANS: Stage: Stage-1 Map Reduce - Alias -> Map Operator Tree: - src + Map Operator Tree: TableScan alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: 1 - type: int - expr: 2 - type: int + expressions: 1 (type: int), 2 (type: int) outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - tag: -1 - value expressions: - expr: _col0 - type: int - expr: _col1 - type: int + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int) Reduce Operator Tree: - Extract + Select Operator + expressions: VALUE._col0 (type: int), VALUE._col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -88,7 +88,6 @@ Stage: Stage-2 Stats-Aggr Operator - PREHOOK: query: INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') SELECT 1, 2 FROM src LIMIT 1 PREHOOK: type: QUERY @@ -103,20 +102,20 @@ POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b SIMPLE [] PREHOOK: query: DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') PREHOOK: type: DESCTABLE +PREHOOK: Input: default@part_special POSTHOOK: query: DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') POSTHOOK: type: DESCTABLE -POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).a SIMPLE [] -POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b SIMPLE [] -a string None -b string None -ds string None -ts string None +POSTHOOK: Input: default@part_special +a string +b string +ds string +ts string # Partition Information # col_name data_type comment -ds string None -ts string None +ds string +ts string #### A masked pattern was here #### PREHOOK: query: SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = '10:11:12=455' @@ -129,6 +128,4 @@ POSTHOOK: Input: default@part_special POSTHOOK: Input: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455 #### A masked pattern was here #### -POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).a SIMPLE [] -POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b SIMPLE [] 1 2 2008 04 08 10:11:12=455 Index: ql/src/test/results/clientpositive/join_alt_syntax.q.out =================================================================== --- ql/src/test/results/clientpositive/join_alt_syntax.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_alt_syntax.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part Warning: Shuffle Join JOIN[4][tables = [p1, p2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain select p1.p_name, p2.p_name from part p1 , part p2 @@ -57,17 +15,17 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) TableScan alias: p1 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) Reduce Operator Tree: Join Operator @@ -77,14 +35,14 @@ 0 {VALUE._col1} 1 {VALUE._col1} outputColumnNames: _col1, _col13 - Statistics: Num rows: 34 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col13 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 34 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 34 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -114,37 +72,37 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan alias: p2 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan alias: p1 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -155,17 +113,17 @@ 1 {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} outputColumnNames: _col1, _col13, _col25 - Statistics: Num rows: 35 Data size: 3601 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 = _col13) and (_col13 = _col25)) (type: boolean) - Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -195,41 +153,41 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: p_name (type: string) outputColumnNames: _col0 - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan alias: p3 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan alias: p1 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -240,17 +198,17 @@ 1 {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} outputColumnNames: _col1, _col12, _col14 - Statistics: Num rows: 35 Data size: 3601 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 = _col12) and (_col12 = _col14)) (type: boolean) - Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col12 (type: string), _col14 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -282,20 +240,20 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string) TableScan alias: p1 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string) Reduce Operator Tree: Join Operator @@ -305,10 +263,10 @@ 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col12, _col13 - Statistics: Num rows: 33 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -321,21 +279,21 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col12 (type: int) Reduce Operator Tree: Join Operator @@ -345,17 +303,17 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col12} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col12, _col13, _col25 - Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and (_col25 = _col13)) (type: boolean) - Statistics: Num rows: 4 Data size: 423 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 370 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 423 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 370 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 423 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 370 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -389,26 +347,26 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE TableScan alias: p1 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -417,7 +375,7 @@ 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col12, _col13 - Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -430,21 +388,21 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col12 (type: int) Reduce Operator Tree: Join Operator @@ -454,7 +412,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col12} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col12, _col13, _col25 - Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -467,22 +425,22 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_partkey is not null (type: boolean) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col12 (type: int), _col13 (type: string), _col25 (type: string) Reduce Operator Tree: Join Operator @@ -492,17 +450,17 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col11} {VALUE._col12} {VALUE._col24} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col12, _col13, _col25, _col36, _col37 - Statistics: Num rows: 18 Data size: 1980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) - Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string), _col37 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -536,26 +494,26 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE TableScan alias: p1 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -564,7 +522,7 @@ 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col12, _col13 - Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -577,21 +535,21 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 31 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col12 (type: int) Reduce Operator Tree: Join Operator @@ -601,7 +559,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col12} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col12, _col13, _col25 - Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -614,22 +572,22 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_partkey is not null (type: boolean) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col12 (type: int), _col13 (type: string), _col25 (type: string) Reduce Operator Tree: Join Operator @@ -639,17 +597,17 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col11} {VALUE._col12} {VALUE._col24} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col12, _col13, _col25, _col36, _col37 - Statistics: Num rows: 18 Data size: 1980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) - Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string), _col37 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: explain select * from part p1 join part p2 join part p3 on p1.p_name = p2.p_name and p2.p_name = p3.p_name PREHOOK: type: QUERY @@ -56,39 +14,39 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -100,14 +58,14 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -135,39 +93,39 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -179,14 +137,14 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -216,20 +174,20 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -239,10 +197,10 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -255,22 +213,22 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -280,14 +238,14 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -317,20 +275,20 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((p_partkey = 1) and p_name is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 634 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 634 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -340,7 +298,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -353,22 +311,22 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -378,14 +336,14 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: explain select * from part p1 join part p2 join part p3 on p1.p_name = p2.p_name join part p4 on p2.p_name = p3.p_name and p1.p_name = p4.p_name PREHOOK: type: QUERY @@ -56,51 +14,51 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -114,14 +72,14 @@ 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -153,27 +111,27 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -183,7 +141,7 @@ 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -196,22 +154,22 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -221,7 +179,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -234,22 +192,22 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_partkey is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator @@ -259,14 +217,14 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: explain select * from part p1 join part p2 join part p3 where p1.p_name = p2.p_name and p2.p_name = p3.p_name @@ -58,39 +16,39 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -102,17 +60,17 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 = _col13) and (_col13 = _col25)) (type: boolean) - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -142,39 +100,39 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -186,17 +144,17 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col13 = _col1) and (_col25 = _col13)) (type: boolean) - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -228,20 +186,20 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -251,10 +209,10 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -267,22 +225,22 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -292,17 +250,17 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and (_col25 = _col13)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 3 Data size: 370 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 3 Data size: 370 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 3 Data size: 370 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -334,20 +292,20 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and (p_partkey = 1)) (type: boolean) - Statistics: Num rows: 1 Data size: 634 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 634 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -357,7 +315,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -370,22 +328,22 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -395,17 +353,17 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col25 = _col13) (type: boolean) - Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: explain select * from part p1 join part p2 join part p3 on p1.p_name = p2.p_name join part p4 where p2.p_name = p3.p_name and p1.p_name = p4.p_name @@ -58,51 +16,51 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -116,17 +74,17 @@ 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col13 = _col25) and (_col1 = _col37)) (type: boolean) - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 1235 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 1235 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 1235 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -160,27 +118,27 @@ Map Operator Tree: TableScan alias: p2 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -190,7 +148,7 @@ 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -203,22 +161,22 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -228,7 +186,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -241,22 +199,22 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_partkey is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator @@ -266,17 +224,17 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 126 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: create table part2( p2_partkey INT, p2_name STRING, @@ -136,15 +94,15 @@ value expressions: p2_partkey (type: int), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -156,14 +114,14 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -215,15 +173,15 @@ value expressions: p2_partkey (type: int), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -235,14 +193,14 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -282,10 +240,10 @@ value expressions: p2_partkey (type: int), p2_name (type: string), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -295,10 +253,10 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -326,7 +284,7 @@ key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -336,14 +294,14 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 951 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 951 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 951 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -383,10 +341,10 @@ value expressions: p2_name (type: string), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -396,7 +354,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -424,7 +382,7 @@ key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -434,14 +392,14 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: create table part2( p2_partkey INT, p2_name STRING, @@ -112,27 +70,27 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p3 @@ -170,14 +128,14 @@ 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -221,15 +179,15 @@ value expressions: p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -239,7 +197,7 @@ 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -267,7 +225,7 @@ key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -277,7 +235,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 1024 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -290,22 +248,22 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_partkey is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 1024 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator @@ -315,14 +273,14 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: create table part2( p2_partkey INT, p2_name STRING, @@ -138,15 +96,15 @@ value expressions: p2_partkey (type: int), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -158,17 +116,17 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 = _col13) and (_col13 = _col25)) (type: boolean) - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -222,15 +180,15 @@ value expressions: p2_partkey (type: int), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -242,17 +200,17 @@ 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3460 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col13 = _col1) and (_col25 = _col13)) (type: boolean) - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -294,10 +252,10 @@ value expressions: p2_partkey (type: int), p2_name (type: string), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -307,10 +265,10 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -338,7 +296,7 @@ key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -348,17 +306,17 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 951 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col12 + _col0) = _col0) and (_col25 = _col13)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 135 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 135 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 135 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -400,10 +358,10 @@ value expressions: p2_name (type: string), p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -413,7 +371,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -441,7 +399,7 @@ key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -451,17 +409,17 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col25 = _col13) (type: boolean) - Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 - Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out =================================================================== --- ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: create table part2( p2_partkey INT, p2_name STRING, @@ -114,27 +72,27 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string) sort order: + Map-reduce partition columns: p_name (type: string) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan alias: p3 @@ -172,17 +130,17 @@ 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 42 Data size: 5190 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col13 = _col25) and (_col1 = _col37)) (type: boolean) - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 1235 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 1235 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 1235 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -228,15 +186,15 @@ value expressions: p2_mfgr (type: string), p2_brand (type: string), p2_type (type: string), p2_size (type: int), p2_container (type: string), p2_retailprice (type: double), p2_comment (type: string) TableScan alias: p1 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_name is not null and p_partkey is not null) (type: boolean) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_name (type: string), p_partkey (type: int) sort order: ++ Map-reduce partition columns: p_name (type: string), p_partkey (type: int) - Statistics: Num rows: 2 Data size: 1269 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) Reduce Operator Tree: Join Operator @@ -246,7 +204,7 @@ 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -274,7 +232,7 @@ key expressions: _col13 (type: string) sort order: + Map-reduce partition columns: _col13 (type: string) - Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator @@ -284,7 +242,7 @@ 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 - Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 1024 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -297,22 +255,22 @@ Map Operator Tree: TableScan alias: p4 - Statistics: Num rows: 5 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_partkey is not null (type: boolean) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 3 Data size: 1903 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 1024 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator @@ -322,17 +280,17 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 - Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 123 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 123 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 123 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/join_merging.q.out =================================================================== --- ql/src/test/results/clientpositive/join_merging.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/join_merging.q.out (working copy) @@ -1,31 +1,3 @@ -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part PREHOOK: query: explain select p1.p_size, p2.p_size from part p1 left outer join part p2 on p1.p_partkey = p2.p_partkey right outer join part p3 on p2.p_partkey = p3.p_partkey and @@ -46,32 +18,32 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE TableScan alias: p2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_size (type: int) TableScan alias: p1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_size > 10) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE value expressions: p_size (type: int) Reduce Operator Tree: Join Operator @@ -83,14 +55,14 @@ 1 {VALUE._col4} 2 outputColumnNames: _col5, _col17 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 57 Data size: 6923 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: int), _col17 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 57 Data size: 6923 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 57 Data size: 6923 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -122,32 +94,32 @@ Map Operator Tree: TableScan alias: p3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE TableScan alias: p2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_size (type: int) TableScan alias: p1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_size > 10) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_partkey (type: int) sort order: + Map-reduce partition columns: p_partkey (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE value expressions: p_size (type: int) Reduce Operator Tree: Join Operator @@ -159,17 +131,17 @@ 1 {VALUE._col4} 2 outputColumnNames: _col5, _col17 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 57 Data size: 6923 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col5 > (_col17 + 10)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 19 Data size: 2307 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: int), _col17 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 19 Data size: 2307 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 19 Data size: 2307 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/lateral_view.q.out =================================================================== --- ql/src/test/results/clientpositive/lateral_view.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/lateral_view.q.out (working copy) @@ -132,14 +132,14 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE @@ -159,11 +159,11 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE @@ -199,12 +199,12 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Forward - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: int) outputColumnNames: _col5 @@ -259,9 +259,9 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Forward - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: int) outputColumnNames: _col5 @@ -332,12 +332,12 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Forward - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1000 Data size: 268000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator @@ -390,9 +390,9 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5 - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Forward - Statistics: Num rows: 1000 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 158000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1000 Data size: 268000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator @@ -519,10 +519,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 1406 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col4 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col4 (type: int) outputColumnNames: _col0 @@ -546,7 +546,7 @@ function name: explode Lateral View Join Operator outputColumnNames: _col4 - Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 162000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col4 (type: int) outputColumnNames: _col0 Index: ql/src/test/results/clientpositive/lateral_view_noalias.q.out =================================================================== --- ql/src/test/results/clientpositive/lateral_view_noalias.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/lateral_view_noalias.q.out (working copy) @@ -18,14 +18,14 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE @@ -45,11 +45,11 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE @@ -158,10 +158,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 @@ -182,7 +182,7 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 @@ -259,10 +259,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 @@ -283,7 +283,7 @@ function name: explode Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 326000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Index: ql/src/test/results/clientpositive/lateral_view_ppd.q.out =================================================================== --- ql/src/test/results/clientpositive/lateral_view_ppd.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/lateral_view_ppd.q.out (working copy) @@ -175,23 +175,44 @@ Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((ds = '2008-04-08') and (hr = '12')) (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Lateral View Forward - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: value (type: string) - outputColumnNames: value - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Lateral View Forward + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Lateral View Join Operator + outputColumnNames: _col1, _col7 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: string), _col7 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 12 + Statistics: Num rows: 12 Data size: 120 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 120 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: array(1,2,3) (type: array) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + UDTF Operator + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + function name: explode Lateral View Join Operator outputColumnNames: _col1, _col7 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col7 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 12 Statistics: Num rows: 12 Data size: 120 Basic stats: COMPLETE Column stats: NONE @@ -202,30 +223,6 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Select Operator - expressions: array(1,2,3) (type: array) - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - UDTF Operator - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - function name: explode - Lateral View Join Operator - outputColumnNames: _col1, _col7 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col1 (type: string), _col7 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 12 - Statistics: Num rows: 12 Data size: 120 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 12 Data size: 120 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator @@ -236,18 +233,12 @@ PREHOOK: query: SELECT value, myCol FROM (SELECT * FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol) a WHERE ds='2008-04-08' AND hr="12" LIMIT 12 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT value, myCol FROM (SELECT * FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol) a WHERE ds='2008-04-08' AND hr="12" LIMIT 12 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### val_238 1 val_238 2 Index: ql/src/test/results/clientpositive/leadlag.q.out =================================================================== --- ql/src/test/results/clientpositive/leadlag.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/leadlag.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: --1. testLagWithPTFWindowing select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, Index: ql/src/test/results/clientpositive/leadlag_queries.q.out =================================================================== --- ql/src/test/results/clientpositive/leadlag_queries.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/leadlag_queries.q.out (working copy) @@ -1,41 +1,3 @@ -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- 1. testLeadUDAF select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr order by p_name) as l1, Index: ql/src/test/results/clientpositive/load_dyn_part14_win.q.out =================================================================== --- ql/src/test/results/clientpositive/load_dyn_part14_win.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/load_dyn_part14_win.q.out (working copy) @@ -5,6 +5,8 @@ create table if not exists nzhang_part14 (key string) partitioned by (value string) PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 POSTHOOK: query: -- INCLUDE_OS_WINDOWS -- included only on windows because of difference in file name encoding logic @@ -12,18 +14,21 @@ create table if not exists nzhang_part14 (key string) partitioned by (value string) POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_part14 PREHOOK: query: describe extended nzhang_part14 PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 POSTHOOK: query: describe extended nzhang_part14 POSTHOOK: type: DESCTABLE -key string None -value string None +POSTHOOK: Input: default@nzhang_part14 +key string +value string # Partition Information # col_name data_type comment -value string None +value string #### A masked pattern was here #### PREHOOK: query: explain @@ -46,9 +51,6 @@ select 'k3' as key, ' ' as value from src limit 2 ) T POSTHOOK: type: QUERY -ABSTRACT SYNTAX TREE: - (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'k1' key) (TOK_SELEXPR (TOK_FUNCTION TOK_STRING TOK_NULL) value)) (TOK_LIMIT 2))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'k2' key) (TOK_SELEXPR '' value)) (TOK_LIMIT 2)))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'k3' key) (TOK_SELEXPR ' ' value)) (TOK_LIMIT 2)))) T)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME nzhang_part14) (TOK_PARTSPEC (TOK_PARTVAL value)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) - STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1, Stage-9, Stage-10 @@ -65,88 +67,79 @@ STAGE PLANS: Stage: Stage-1 Map Reduce - Alias -> Map Operator Tree: - null-subquery1-subquery2:t-subquery1-subquery2:src + Map Operator Tree: TableScan alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: 'k2' - type: string - expr: '' - type: string + expressions: 'k2' (type: string), '' (type: string) outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - tag: -1 - value expressions: - expr: _col0 - type: string - expr: _col1 - type: string + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string), _col1 (type: string) Reduce Operator Tree: - Extract + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 0 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-2 Map Reduce - Alias -> Map Operator Tree: -#### A masked pattern was here #### + Map Operator Tree: TableScan Union + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: _col0 - type: string - expr: _col1 - type: string + expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 1 + Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.nzhang_part14 -#### A masked pattern was here #### TableScan Union + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: _col0 - type: string - expr: _col1 - type: string + expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 1 + Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.nzhang_part14 -#### A masked pattern was here #### TableScan Union + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: _col0 - type: string - expr: _col1 - type: string + expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 1 + Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -179,11 +172,10 @@ Stage: Stage-4 Map Reduce - Alias -> Map Operator Tree: -#### A masked pattern was here #### + Map Operator Tree: + TableScan File Output Operator compressed: false - GlobalTableId: 0 table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -192,11 +184,10 @@ Stage: Stage-6 Map Reduce - Alias -> Map Operator Tree: -#### A masked pattern was here #### + Map Operator Tree: + TableScan File Output Operator compressed: false - GlobalTableId: 0 table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -211,69 +202,68 @@ Stage: Stage-9 Map Reduce - Alias -> Map Operator Tree: - null-subquery2:t-subquery2:src + Map Operator Tree: TableScan alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: 'k3' - type: string - expr: ' ' - type: string + expressions: 'k3' (type: string), ' ' (type: string) outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 85500 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - tag: -1 - value expressions: - expr: _col0 - type: string - expr: _col1 - type: string + Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string), _col1 (type: string) Reduce Operator Tree: - Extract + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 0 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-10 Map Reduce - Alias -> Map Operator Tree: - null-subquery1-subquery1:t-subquery1-subquery1:src + Map Operator Tree: TableScan alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: - expr: 'k1' - type: string - expr: UDFToString(null) - type: string + expressions: 'k1' (type: string), UDFToString(null) (type: string) outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - tag: -1 - value expressions: - expr: _col0 - type: string - expr: _col1 - type: string + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string), _col1 (type: string) Reduce Operator Tree: - Extract + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - GlobalTableId: 0 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - PREHOOK: query: insert overwrite table nzhang_part14 partition(value) select key, value from ( select 'k1' as key, cast(null as string) as value from src limit 2 @@ -295,37 +285,35 @@ ) T POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@nzhang_part14@value=%20 +POSTHOOK: Output: default@nzhang_part14@value=%2520 POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=%20).key EXPRESSION [] POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] PREHOOK: query: show partitions nzhang_part14 PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@nzhang_part14 POSTHOOK: query: show partitions nzhang_part14 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] -value=%20 +POSTHOOK: Input: default@nzhang_part14 +value=%2520 value=__HIVE_DEFAULT_PARTITION__ PREHOOK: query: select * from nzhang_part14 where value <> 'a' order by key, value PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_part14 -PREHOOK: Input: default@nzhang_part14@value=%20 +PREHOOK: Input: default@nzhang_part14@value=%2520 PREHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select * from nzhang_part14 where value <> 'a' order by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_part14 -POSTHOOK: Input: default@nzhang_part14@value=%20 +POSTHOOK: Input: default@nzhang_part14@value=%2520 POSTHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] k1 __HIVE_DEFAULT_PARTITION__ k1 __HIVE_DEFAULT_PARTITION__ k2 __HIVE_DEFAULT_PARTITION__ k2 __HIVE_DEFAULT_PARTITION__ -k3 -k3 +k3 %20 +k3 %20 Index: ql/src/test/results/clientpositive/orc_merge1.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge1.q.out (working copy) @@ -110,48 +110,8 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge1 partition (ds='1', part='0') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge1 -POSTHOOK: query: DESC FORMATTED orcfile_merge1 partition (ds='1', part='0') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge1 -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string -part string - -# Detailed Partition Information -Partition Value: [1, 0] -Database: default -Table: orcfile_merge1 +Found 2 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 2 - numRows 242 - rawDataSize 22748 - totalSize 1747 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: -- auto-merge slow way EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) @@ -266,48 +226,8 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge1b partition (ds='1', part='0') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge1b -POSTHOOK: query: DESC FORMATTED orcfile_merge1b partition (ds='1', part='0') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge1b -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string -part string - -# Detailed Partition Information -Partition Value: [1, 0] -Database: default -Table: orcfile_merge1b +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 242 - rawDataSize 22748 - totalSize 1332 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: -- auto-merge fast way EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) @@ -412,48 +332,8 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge1c partition (ds='1', part='0') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge1c -POSTHOOK: query: DESC FORMATTED orcfile_merge1c partition (ds='1', part='0') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge1c -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string -part string - -# Detailed Partition Information -Partition Value: [1, 0] -Database: default -Table: orcfile_merge1c +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 242 - rawDataSize 22748 - totalSize 1623 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: -- Verify SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) Index: ql/src/test/results/clientpositive/orc_merge2.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge2.q.out (working copy) @@ -173,49 +173,8 @@ POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge2a partition (one='1', two='0', three='2') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge2a -POSTHOOK: query: DESC FORMATTED orcfile_merge2a partition (one='1', two='0', three='2') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge2a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -one string -two string -three string - -# Detailed Partition Information -Partition Value: [1, 0, 2] -Database: default -Table: orcfile_merge2a +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 4 - rawDataSize 376 - totalSize 320 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge2a Index: ql/src/test/results/clientpositive/orc_merge3.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge3.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge3.q.out (working copy) @@ -142,42 +142,8 @@ POSTHOOK: Output: default@orcfile_merge3b POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: DESC FORMATTED orcfile_merge3b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3b -POSTHOOK: query: DESC FORMATTED orcfile_merge3b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3b -# col_name data_type comment - -key int -value string - -# Detailed Table Information -Database: default +Found 1 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1000 - rawDataSize 94000 - totalSize 4834 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) FROM orcfile_merge3a Index: ql/src/test/results/clientpositive/orc_merge4.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge4.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge4.q.out (working copy) @@ -36,47 +36,8 @@ POSTHOOK: Output: default@orcfile_merge3a@ds=1 POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3a -POSTHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string - -# Detailed Partition Information -Partition Value: [1] -Database: default -Table: orcfile_merge3a +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 500 - rawDataSize 47000 - totalSize 2496 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') SELECT * FROM src PREHOOK: type: QUERY @@ -101,88 +62,10 @@ POSTHOOK: Output: default@orcfile_merge3a@ds=2 POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3a -POSTHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string - -# Detailed Partition Information -Partition Value: [1] -Database: default -Table: orcfile_merge3a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 500 - rawDataSize 47000 - totalSize 2496 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='2') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3a -POSTHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='2') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string - -# Detailed Partition Information -Partition Value: [2] -Database: default -Table: orcfile_merge3a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 500 - rawDataSize 47000 - totalSize 2496 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b SELECT key, value FROM orcfile_merge3a PREHOOK: type: QUERY Index: ql/src/test/results/clientpositive/orc_merge5.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge5.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge5.q.out (working copy) @@ -97,45 +97,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 3 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -252,45 +215,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 1 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -323,45 +249,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 3 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -416,45 +305,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 1 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b Index: ql/src/test/results/clientpositive/orc_merge6.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge6.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge6.q.out (working copy) @@ -127,96 +127,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 3 items #### A masked pattern was here #### -Protect Mode: None +Found 3 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -376,96 +290,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -538,96 +366,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 3 items #### A masked pattern was here #### -Protect Mode: None +Found 3 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -720,96 +462,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a Index: ql/src/test/results/clientpositive/orc_merge7.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge7.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge7.q.out (working copy) @@ -160,94 +160,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 2 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 2 - numRows 2 - rawDataSize 510 - totalSize 1044 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -441,94 +357,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 2 - rawDataSize 510 - totalSize 838 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -640,94 +472,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 2 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 2 - numRows 2 - rawDataSize 510 - totalSize 1044 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -822,94 +570,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 2 - rawDataSize 510 - totalSize 838 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a Index: ql/src/test/results/clientpositive/orc_merge_incompat1.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge_incompat1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge_incompat1.q.out (working copy) @@ -148,45 +148,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 5 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 5 - numRows 15 - rawDataSize 3825 - totalSize 2877 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -228,45 +191,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 3 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 15 - rawDataSize 3825 - totalSize 2340 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b Index: ql/src/test/results/clientpositive/orc_merge_incompat2.q.out =================================================================== --- ql/src/test/results/clientpositive/orc_merge_incompat2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/orc_merge_incompat2.q.out (working copy) @@ -218,94 +218,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 4 items #### A masked pattern was here #### -Protect Mode: None +Found 4 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 4 - numRows 4 - rawDataSize 1020 - totalSize 2060 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 4 - numRows 8 - rawDataSize 2040 - totalSize 2188 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -407,94 +323,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 3 items #### A masked pattern was here #### -Protect Mode: None +Found 3 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 4 - rawDataSize 1020 - totalSize 1819 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 8 - rawDataSize 2040 - totalSize 1928 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a Index: ql/src/test/results/clientpositive/order_within_subquery.q.out =================================================================== --- ql/src/test/results/clientpositive/order_within_subquery.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/order_within_subquery.q.out (working copy) @@ -1,39 +1,3 @@ -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: select t1.p_name, t2.p_name from (select * from part order by p_size limit 10) t1 join part t2 on t1.p_partkey = t2.p_partkey and t1.p_size = t2.p_size where t1.p_partkey < 100000 Index: ql/src/test/results/clientpositive/ptf.q.out =================================================================== --- ql/src/test/results/clientpositive/ptf.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/ptf.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- SORT_QUERY_RESULTS --1. test1 Index: ql/src/test/results/clientpositive/ptf_decimal.q.out =================================================================== --- ql/src/test/results/clientpositive/ptf_decimal.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/ptf_decimal.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE IF EXISTS part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DECIMAL(6,2), - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DECIMAL(6,2), - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- 1. aggregate functions with decimal type select p_mfgr, p_retailprice, Index: ql/src/test/results/clientpositive/ptf_general_queries.q.out =================================================================== --- ql/src/test/results/clientpositive/ptf_general_queries.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/ptf_general_queries.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- 1. testNoPTFNoWindowing select p_mfgr, p_name, p_size from part Index: ql/src/test/results/clientpositive/ptf_streaming.q.out =================================================================== --- ql/src/test/results/clientpositive/ptf_streaming.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/ptf_streaming.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: create temporary function noopstreaming as 'org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming$NoopStreamingResolver' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: noopstreaming Index: ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out =================================================================== --- ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out (working copy) @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE lineitem +PREHOOK: query: DROP TABLE IF EXISTS lineitem_ix PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem +POSTHOOK: query: DROP TABLE IF EXISTS lineitem_ix POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, +PREHOOK: query: CREATE TABLE lineitem_ix (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, @@ -22,8 +22,8 @@ FIELDS TERMINATED BY '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, +PREHOOK: Output: default@lineitem_ix +POSTHOOK: query: CREATE TABLE lineitem_ix (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, @@ -43,40 +43,40 @@ FIELDS TERMINATED BY '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +POSTHOOK: Output: default@lineitem_ix +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +PREHOOK: Output: default@lineitem_ix +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: CREATE INDEX lineitem_lshipdate_idx ON TABLE lineitem(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)") +POSTHOOK: Output: default@lineitem_ix +PREHOOK: query: CREATE INDEX lineitem_ix_lshipdate_idx ON TABLE lineitem_ix(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)") PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@lineitem -POSTHOOK: query: CREATE INDEX lineitem_lshipdate_idx ON TABLE lineitem(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)") +PREHOOK: Input: default@lineitem_ix +POSTHOOK: query: CREATE INDEX lineitem_ix_lshipdate_idx ON TABLE lineitem_ix(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)") POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@lineitem -POSTHOOK: Output: default@default__lineitem_lineitem_lshipdate_idx__ -PREHOOK: query: ALTER INDEX lineitem_lshipdate_idx ON lineitem REBUILD +POSTHOOK: Input: default@lineitem_ix +POSTHOOK: Output: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +PREHOOK: query: ALTER INDEX lineitem_ix_lshipdate_idx ON lineitem_ix REBUILD PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@lineitem -PREHOOK: Output: default@default__lineitem_lineitem_lshipdate_idx__ -POSTHOOK: query: ALTER INDEX lineitem_lshipdate_idx ON lineitem REBUILD +PREHOOK: Input: default@lineitem_ix +PREHOOK: Output: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +POSTHOOK: query: ALTER INDEX lineitem_ix_lshipdate_idx ON lineitem_ix REBUILD POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@lineitem -POSTHOOK: Output: default@default__lineitem_lineitem_lshipdate_idx__ -POSTHOOK: Lineage: default__lineitem_lineitem_lshipdate_idx__._bucketname SIMPLE [(lineitem)lineitem.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__lineitem_lineitem_lshipdate_idx__._count_of_l_shipdate EXPRESSION [(lineitem)lineitem.FieldSchema(name:l_shipdate, type:string, comment:null), ] -POSTHOOK: Lineage: default__lineitem_lineitem_lshipdate_idx__._offsets EXPRESSION [(lineitem)lineitem.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__lineitem_lineitem_lshipdate_idx__.l_shipdate SIMPLE [(lineitem)lineitem.FieldSchema(name:l_shipdate, type:string, comment:null), ] +POSTHOOK: Input: default@lineitem_ix +POSTHOOK: Output: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +POSTHOOK: Lineage: default__lineitem_ix_lineitem_ix_lshipdate_idx__._bucketname SIMPLE [(lineitem_ix)lineitem_ix.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__lineitem_ix_lineitem_ix_lshipdate_idx__._count_of_l_shipdate EXPRESSION [(lineitem_ix)lineitem_ix.FieldSchema(name:l_shipdate, type:string, comment:null), ] +POSTHOOK: Lineage: default__lineitem_ix_lineitem_ix_lshipdate_idx__._offsets EXPRESSION [(lineitem_ix)lineitem_ix.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__lineitem_ix_lineitem_ix_lshipdate_idx__.l_shipdate SIMPLE [(lineitem_ix)lineitem_ix.FieldSchema(name:l_shipdate, type:string, comment:null), ] PREHOOK: query: explain select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate PREHOOK: type: QUERY POSTHOOK: query: explain select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -88,7 +88,7 @@ Map Reduce Map Operator Tree: TableScan - alias: lineitem + alias: lineitem_ix Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string) @@ -132,18 +132,18 @@ ListSink PREHOOK: query: select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate order by l_shipdate PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem +PREHOOK: Input: default@lineitem_ix #### A masked pattern was here #### POSTHOOK: query: select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate order by l_shipdate POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem +POSTHOOK: Input: default@lineitem_ix #### A masked pattern was here #### 1992-04-27 1 1992-07-02 1 @@ -241,11 +241,11 @@ 1998-10-23 1 1998-10-30 1 PREHOOK: query: explain select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate PREHOOK: type: QUERY POSTHOOK: query: explain select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -257,23 +257,23 @@ Map Reduce Map Operator Tree: TableScan - alias: default.default__lineitem_lineitem_lshipdate_idx__ - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + alias: default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint) outputColumnNames: l_shipdate, _count_of_l_shipdate - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_count_of_l_shipdate) keys: l_shipdate (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -281,14 +281,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -301,20 +301,20 @@ ListSink PREHOOK: query: select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate order by l_shipdate PREHOOK: type: QUERY -PREHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ -PREHOOK: Input: default@lineitem +PREHOOK: Input: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +PREHOOK: Input: default@lineitem_ix #### A masked pattern was here #### POSTHOOK: query: select l_shipdate, count(l_shipdate) -from lineitem +from lineitem_ix group by l_shipdate order by l_shipdate POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ -POSTHOOK: Input: default@lineitem +POSTHOOK: Input: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +POSTHOOK: Input: default@lineitem_ix #### A masked pattern was here #### 1992-04-27 1 1992-07-02 1 @@ -414,14 +414,14 @@ PREHOOK: query: explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month PREHOOK: type: QUERY POSTHOOK: query: explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month POSTHOOK: type: QUERY @@ -435,7 +435,7 @@ Map Reduce Map Operator Tree: TableScan - alias: lineitem + alias: lineitem_ix Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string) @@ -502,20 +502,20 @@ PREHOOK: query: select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem +PREHOOK: Input: default@lineitem_ix #### A masked pattern was here #### POSTHOOK: query: select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem +POSTHOOK: Input: default@lineitem_ix #### A masked pattern was here #### 1992 4 1 1992 7 3 @@ -563,14 +563,14 @@ PREHOOK: query: explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month PREHOOK: type: QUERY POSTHOOK: query: explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month POSTHOOK: type: QUERY @@ -584,23 +584,23 @@ Map Reduce Map Operator Tree: TableScan - alias: default.default__lineitem_lineitem_lshipdate_idx__ - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + alias: default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint) outputColumnNames: l_shipdate, _count_of_l_shipdate - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_count_of_l_shipdate) keys: year(l_shipdate) (type: int), month(l_shipdate) (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -608,11 +608,11 @@ keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -627,16 +627,16 @@ Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -651,22 +651,22 @@ PREHOOK: query: select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month PREHOOK: type: QUERY -PREHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ -PREHOOK: Input: default@lineitem +PREHOOK: Input: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +PREHOOK: Input: default@lineitem_ix #### A masked pattern was here #### POSTHOOK: query: select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem +from lineitem_ix group by year(l_shipdate), month(l_shipdate) order by year, month POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ -POSTHOOK: Input: default@lineitem +POSTHOOK: Input: default@default__lineitem_ix_lineitem_ix_lshipdate_idx__ +POSTHOOK: Input: default@lineitem_ix #### A masked pattern was here #### 1992 4 1 1992 7 3 @@ -718,14 +718,14 @@ from (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem + from lineitem_ix where year(l_shipdate) = 1997 group by year(l_shipdate), month(l_shipdate) ) lastyear join (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem + from lineitem_ix where year(l_shipdate) = 1998 group by year(l_shipdate), month(l_shipdate) ) thisyear @@ -738,14 +738,14 @@ from (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem + from lineitem_ix where year(l_shipdate) = 1997 group by year(l_shipdate), month(l_shipdate) ) lastyear join (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem + from lineitem_ix where year(l_shipdate) = 1998 group by year(l_shipdate), month(l_shipdate) ) thisyear @@ -762,29 +762,29 @@ Map Reduce Map Operator Tree: TableScan - alias: thisyear:default.default__lineitem_lineitem_lshipdate_idx__ - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + alias: lastyear:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (year(l_shipdate) = 1998) (type: boolean) - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + predicate: (year(l_shipdate) = 1997) (type: boolean) + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint) outputColumnNames: l_shipdate, _count_of_l_shipdate - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_count_of_l_shipdate) keys: year(l_shipdate) (type: int), month(l_shipdate) (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 23 Data size: 2099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 23 Data size: 2168 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col1 is not null (type: boolean) - Statistics: Num rows: 12 Data size: 1095 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1131 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 12 Data size: 1095 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1131 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -792,11 +792,11 @@ keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 547 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 565 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col2 (type: bigint) outputColumnNames: _col1, _col2 - Statistics: Num rows: 6 Data size: 547 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 565 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -812,14 +812,14 @@ key expressions: _col1 (type: int) sort order: + Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 6 Data size: 547 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 565 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) TableScan Reduce Output Operator key expressions: _col1 (type: int) sort order: + Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 6 Data size: 547 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 565 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Join Operator @@ -829,14 +829,14 @@ 0 {KEY.reducesinkkey0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col1} outputColumnNames: _col1, _col2, _col4, _col5 - Statistics: Num rows: 6 Data size: 601 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 621 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col4 (type: int), ((_col5 - _col2) / _col2) (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 601 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 621 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 601 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 621 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -846,29 +846,29 @@ Map Reduce Map Operator Tree: TableScan - alias: lastyear:default.default__lineitem_lineitem_lshipdate_idx__ - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + alias: thisyear:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (year(l_shipdate) = 1997) (type: boolean) - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + predicate: (year(l_shipdate) = 1998) (type: boolean) + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint) outputColumnNames: l_shipdate, _count_of_l_shipdate - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_count_of_l_shipdate) keys: year(l_shipdate) (type: int), month(l_shipdate) (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 23 Data size: 2099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 23 Data size: 2168 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col1 is not null (type: boolean) - Statistics: Num rows: 12 Data size: 1095 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1131 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 12 Data size: 1095 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1131 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -876,11 +876,11 @@ keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 547 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 565 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col2 (type: bigint) outputColumnNames: _col1, _col2 - Statistics: Num rows: 6 Data size: 547 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 565 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -895,44 +895,44 @@ ListSink PREHOOK: query: explain select l_shipdate, cnt -from (select l_shipdate, count(l_shipdate) as cnt from lineitem group by l_shipdate +from (select l_shipdate, count(l_shipdate) as cnt from lineitem_ix group by l_shipdate union all select l_shipdate, l_orderkey as cnt -from lineitem) dummy +from lineitem_ix) dummy PREHOOK: type: QUERY POSTHOOK: query: explain select l_shipdate, cnt -from (select l_shipdate, count(l_shipdate) as cnt from lineitem group by l_shipdate +from (select l_shipdate, count(l_shipdate) as cnt from lineitem_ix group by l_shipdate union all select l_shipdate, l_orderkey as cnt -from lineitem) dummy +from lineitem_ix) dummy POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 Stage-0 depends on stages: Stage-2 STAGE PLANS: - Stage: Stage-3 + Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: null-subquery1:dummy-subquery1:default.default__lineitem_lineitem_lshipdate_idx__ - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + alias: null-subquery1:dummy-subquery1:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint) outputColumnNames: l_shipdate, _count_of_l_shipdate - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_count_of_l_shipdate) keys: l_shipdate (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 95 Data size: 8675 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -940,11 +940,11 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 47 Data size: 4291 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 47 Data size: 4432 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -956,39 +956,39 @@ Map Reduce Map Operator Tree: TableScan - alias: lineitem + Union + Statistics: Num rows: 163 Data size: 16531 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 163 Data size: 16531 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 163 Data size: 16531 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: lineitem_ix Statistics: Num rows: 116 Data size: 12099 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_shipdate (type: string), UDFToLong(l_orderkey) (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 116 Data size: 12099 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 163 Data size: 16390 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 163 Data size: 16531 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 163 Data size: 16390 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 163 Data size: 16531 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 163 Data size: 16390 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 163 Data size: 16531 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TableScan - Union - Statistics: Num rows: 163 Data size: 16390 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 163 Data size: 16390 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 163 Data size: 16390 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/reducesink_dedup.q.out =================================================================== --- ql/src/test/results/clientpositive/reducesink_dedup.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/reducesink_dedup.q.out (working copy) @@ -1,37 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part PREHOOK: query: select p_name from (select p_name from part distribute by 1 sort by 1) p distribute by 1 sort by 1 @@ -44,3 +10,29 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@part #### A masked pattern was here #### +almond azure blanched chiffon midnight +almond aquamarine dodger light gainsboro +almond antique sky peru orange +almond antique medium spring khaki +almond antique blue firebrick mint +almond azure aquamarine papaya violet +almond aquamarine yellow dodger mint +almond aquamarine floral ivory bisque +almond antique violet mint lemon +almond antique gainsboro frosted violet +almond antique olive coral navajo +almond antique misty red olive +almond antique metallic orange dim +almond antique forest lavender goldenrod +almond antique chartreuse khaki white +almond aquamarine sandy cyan gainsboro +almond aquamarine rose maroon antique +almond aquamarine midnight light salmon +almond antique violet turquoise frosted +almond antique violet chocolate turquoise +almond aquamarine pink moccasin thistle +almond aquamarine burnished black steel +almond antique salmon chartreuse burlywood +almond antique chartreuse lavender yellow +almond antique burnished rose metallic +almond antique burnished rose metallic Index: ql/src/test/results/clientpositive/rename_column.q.out =================================================================== --- ql/src/test/results/clientpositive/rename_column.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/rename_column.q.out (working copy) @@ -147,8 +147,14 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/scriptfile1_win.q.out =================================================================== --- ql/src/test/results/clientpositive/scriptfile1_win.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/scriptfile1_win.q.out (working copy) @@ -3,6 +3,7 @@ CREATE TABLE dest1(key INT, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@dest1 POSTHOOK: query: -- INCLUDE_OS_WINDOWS CREATE TABLE dest1(key INT, value STRING) @@ -39,8 +40,6 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] NULL NULL NULL NULL 10 val_10 Index: ql/src/test/results/clientpositive/show_tables.q.out =================================================================== --- ql/src/test/results/clientpositive/show_tables.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/show_tables.q.out (working copy) @@ -130,10 +130,16 @@ POSTHOOK: query: SHOW TABLES FROM default POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part shtb_test1 shtb_test2 src src1 +src_cbo src_json src_sequencefile src_thrift @@ -152,10 +158,16 @@ POSTHOOK: query: SHOW TABLES IN default POSTHOOK: type: SHOWTABLES alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +lineitem +part shtb_test1 shtb_test2 src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out (working copy) @@ -0,0 +1,660 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: -- copy from skewjoinopt1 +-- test compile time skew join and auto map join +-- a simple join query with skew on both the tables on the join key +-- adding an order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt1 +-- test compile time skew join and auto map join +-- a simple join query with skew on both the tables on the join key +-- adding an order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = '2') or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = '2') or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 +PREHOOK: query: -- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: ((key = '2') or (key = '3')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (not ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (not ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: ((key = '2') or (key = '3')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +NULL NULL 4 14 +NULL NULL 5 15 +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 +PREHOOK: query: -- an aggregation at the end should not change anything + +EXPLAIN +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- an aggregation at the end should not change anything + +EXPLAIN +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = '2') or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = '2') or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +6 +PREHOOK: query: EXPLAIN +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: ((key = '2') or (key = '3')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (not ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (not ((key = '2') or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: ((key = '2') or (key = '3')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: string) + 1 key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +8 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out (working copy) @@ -0,0 +1,700 @@ +PREHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tmpT1 +POSTHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmpT1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@tmpt1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@tmpt1 +PREHOOK: query: -- testing skew on other data types - int +CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: -- testing skew on other data types - int +CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1 +PREHOOK: type: QUERY +PREHOOK: Input: default@tmpt1 +PREHOOK: Output: default@t1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tmpt1 +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.key EXPRESSION [(tmpt1)tmpt1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1.val SIMPLE [(tmpt1)tmpt1.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tmpT2 +POSTHOOK: query: CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmpT2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@tmpt2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@tmpt2 +PREHOOK: query: CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tmpt2 +PREHOOK: Output: default@t2 +POSTHOOK: query: INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tmpt2 +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t2.key EXPRESSION [(tmpt2)tmpt2.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2.val SIMPLE [(tmpt2)tmpt2.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: -- copy from skewjoinopt15 +-- test compile time skew join and auto map join +-- The skewed key is a integer column. +-- Otherwise this test is similar to skewjoinopt1.q +-- Both the joined tables are skewed, and the joined column +-- is an integer +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt15 +-- test compile time skew join and auto map join +-- The skewed key is a integer column. +-- Otherwise this test is similar to skewjoinopt1.q +-- Both the joined tables are skewed, and the joined column +-- is an integer +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = 2) or (key = 3)))) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = 2) or (key = 3)))) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 +PREHOOK: query: -- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key = 2) or (key = 3)) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (not ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (not ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key = 2) or (key = 3)) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +NULL NULL 4 14 +NULL NULL 5 15 +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 +PREHOOK: query: -- an aggregation at the end should not change anything + +EXPLAIN +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- an aggregation at the end should not change anything + +EXPLAIN +SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = 2) or (key = 3)))) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and (not ((key = 2) or (key = 3)))) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +6 +PREHOOK: query: EXPLAIN +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key = 2) or (key = 3)) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (not ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (not ((key = 2) or (key = 3))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key = 2) or (key = 3)) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +8 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out (working copy) @@ -0,0 +1,197 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +CLUSTERED BY (key) INTO 4 BUCKETS +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +CLUSTERED BY (key) INTO 4 BUCKETS +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: -- copy from skewjoinopt19 +-- test compile time skew join and auto map join +-- add a test where the skewed key is also the bucketized key +-- it should not matter, and the compile time skewed join +-- optimization is performed +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt19 +-- test compile time skew join and auto map join +-- add a test where the skewed key is also the bucketized key +-- it should not matter, and the compile time skewed join +-- optimization is performed +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out (working copy) @@ -0,0 +1,366 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: -- copy from skewjoinopt3 +-- test compile time skew join and auto map join +-- a simple query with skew on both the tables. One of the skewed +-- value is common to both the tables. The skewed value should not be +-- repeated in the filter. +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt3 +-- test compile time skew join and auto map join +-- a simple query with skew on both the tables. One of the skewed +-- value is common to both the tables. The skewed value should not be +-- repeated in the filter. +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 +PREHOOK: query: -- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- test outer joins also + +EXPLAIN +SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1, Stage-4 + Stage-4 is a root stage + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (not (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: val (type: string) + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (not (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: val (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Outer Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (((key = '2') or (key = '8')) or (key = '3')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: val (type: string) + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (((key = '2') or (key = '8')) or (key = '3')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: val (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Outer Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +NULL NULL 4 14 +NULL NULL 5 15 +1 11 NULL NULL +2 12 2 22 +3 13 3 13 +7 17 NULL NULL +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out (working copy) @@ -0,0 +1,197 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: -- copy from skewjoinopt6 +-- test compile time skew join and auto map join +-- Both the join tables are skewed by 2 keys, and one of the skewed values +-- is common to both the tables. The join key is a subset of the skewed key set: +-- it only contains the first skewed key for both the tables +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt6 +-- test compile time skew join and auto map join +-- Both the join tables are skewed by 2 keys, and one of the skewed values +-- is common to both the tables. The join key is a subset of the skewed key set: +-- it only contains the first skewed key for both the tables +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +ORDER BY a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 2 22 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out (working copy) @@ -0,0 +1,258 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T3 +POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: -- copy from skewjoinopt7 +-- test compile time skew join and auto map join +-- This test is for validating skewed join compile time optimization for more than +-- 2 tables. The join key is the same, and so a 3-way join would be performed. +-- 2 of the 3 tables are skewed on the join key +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt7 +-- test compile time skew join and auto map join +-- This test is for validating skewed join compile time optimization for more than +-- 2 tables. The join key is the same, and so a 3-way join would be performed. +-- 2 of the 3 tables are skewed on the join key +-- adding a order by at the end to make the results deterministic + +EXPLAIN +SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-10 is a root stage + Stage-2 depends on stages: Stage-10 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-10 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + c + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + subquery1:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + 2 {val} + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + c + TableScan + alias: c + Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + 2 {val} + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + 2 {val} + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + subquery1:c + TableScan + alias: c + Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + 2 {val} + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (((key = '2') or (key = '8')) or (key = '3')))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {key} {val} + 1 {key} {val} + 2 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (((key = '2') or (key = '8')) or (key = '3'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {key} {val} + 1 {key} {val} + 2 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +ORDER BY a.key, b.key, c.key, a.val, b.val, c.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +ORDER BY a.key, b.key, c.key, a.val, b.val, c.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +2 12 2 22 2 12 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out (working copy) @@ -0,0 +1,355 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: -- copy from skewjoinopt9 +-- test compile time skew join and auto map join +-- no skew join compile time optimization would be performed if one of the +-- join sources is a sub-query consisting of a union all +-- adding a order by at the end to make the results deterministic +EXPLAIN +select * from +( +select key, val from T1 + union all +select key, val from T1 +) subq1 +join T2 b on subq1.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt9 +-- test compile time skew join and auto map join +-- no skew join compile time optimization would be performed if one of the +-- join sources is a sub-query consisting of a union all +-- adding a order by at the end to make the results deterministic +EXPLAIN +select * from +( +select key, val from T1 + union all +select key, val from T1 +) subq1 +join T2 b on subq1.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-5 is a root stage + Stage-4 depends on stages: Stage-5 + Stage-0 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-5 + Map Reduce Local Work + Alias -> Map Local Tables: + b + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + b + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {_col0} {_col1} + 1 {val} + keys: + 0 _col0 (type: string) + 1 key (type: string) + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), val (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {key} {val} + keys: + 0 _col0 (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: t1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), val (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {key} {val} + keys: + 0 _col0 (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from +( +select key, val from T1 + union all +select key, val from T1 +) subq1 +join T2 b on subq1.key = b.key +ORDER BY subq1.key, b.key, subq1.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select * from +( +select key, val from T1 + union all +select key, val from T1 +) subq1 +join T2 b on subq1.key = b.key +ORDER BY subq1.key, b.key, subq1.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 2 22 +2 12 2 22 +3 13 3 13 +3 13 3 13 +8 18 8 18 +8 18 8 18 +8 18 8 18 +8 18 8 18 +8 28 8 18 +8 28 8 18 +8 28 8 18 +8 28 8 18 +PREHOOK: query: -- no skew join compile time optimization would be performed if one of the +-- join sources is a sub-query consisting of a group by +EXPLAIN +select * from +( +select key, count(1) as cnt from T1 group by key +) subq1 +join T2 b on subq1.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- no skew join compile time optimization would be performed if one of the +-- join sources is a sub-query consisting of a group by +EXPLAIN +select * from +( +select key, count(1) as cnt from T1 group by key +) subq1 +join T2 b on subq1.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-5 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-5 + Stage-0 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-5 + Map Reduce Local Work + Alias -> Map Local Tables: + b + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + b + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {_col0} {_col1} + 1 {val} + keys: + 0 _col0 (type: string) + 1 key (type: string) + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {key} {val} + keys: + 0 _col0 (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from +( +select key, count(1) as cnt from T1 group by key +) subq1 +join T2 b on subq1.key = b.key +ORDER BY subq1.key, b.key, subq1.cnt, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select * from +( +select key, count(1) as cnt from T1 group by key +) subq1 +join T2 b on subq1.key = b.key +ORDER BY subq1.key, b.key, subq1.cnt, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 1 2 22 +3 1 3 13 +8 2 8 18 +8 2 8 18 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out (working copy) @@ -0,0 +1,267 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: drop table array_valued_T1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table array_valued_T1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@array_valued_T1 +POSTHOOK: query: create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@array_valued_T1 +PREHOOK: query: insert overwrite table array_valued_T1 select key, array(value) from T1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Output: default@array_valued_t1 +POSTHOOK: query: insert overwrite table array_valued_T1 select key, array(value) from T1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@array_valued_t1 +POSTHOOK: Lineage: array_valued_t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: array_valued_t1.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: -- copy from skewjoinopt10 +-- test compile time skew join and auto map join +-- This test is to verify the skew join compile optimization when the join is followed by a lateral view +-- adding a order by at the end to make the results deterministic + +explain +select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt10 +-- test compile time skew join and auto map join +-- This test is to verify the skew join compile optimization when the join is followed by a lateral view +-- adding a order by at the end to make the results deterministic + +explain +select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-2 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + i:a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + i:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '8')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 {value} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '8'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 {value} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '8'))) (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 {value} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col6 (type: array) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Lateral View Forward + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + expressions: _col0 (type: string), _col1 (type: array) + outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Lateral View Join Operator + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: array), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: _col1 (type: array) + outputColumnNames: _col0 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + UDTF Operator + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + function name: explode + Lateral View Join Operator + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: array), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '8')) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 {value} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col6 (type: array) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Lateral View Forward + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + SELECT * : (no compute) + expressions: _col0 (type: string), _col1 (type: array) + outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Lateral View Join Operator + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: array), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: _col1 (type: array) + outputColumnNames: _col0 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + UDTF Operator + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + function name: explode + Lateral View Join Operator + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: array), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +ORDER BY key, val +PREHOOK: type: QUERY +PREHOOK: Input: default@array_valued_t1 +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +ORDER BY key, val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@array_valued_t1 +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 ["11"] 11 +2 ["12"] 12 +3 ["13"] 13 +7 ["17"] 17 +8 ["18"] 18 +8 ["18"] 18 +8 ["28"] 28 +8 ["28"] 28 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out (working copy) @@ -0,0 +1,363 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: -- copy from skewjoinopt11 +-- test compile time skew join and auto map join +-- This test is to verify the skew join compile optimization when the join is followed +-- by a union. Both sides of a union consist of a join, which should have used +-- skew join compile time optimization. +-- adding an order by at the end to make the results deterministic + +EXPLAIN +select * from +( + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + union all + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key +) subq1 +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt11 +-- test compile time skew join and auto map join +-- This test is to verify the skew join compile optimization when the join is followed +-- by a union. Both sides of a union consist of a join, which should have used +-- skew join compile time optimization. +-- adding an order by at the end to make the results deterministic + +EXPLAIN +select * from +( + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + union all + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key +) subq1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-16 is a root stage + Stage-2 depends on stages: Stage-16 + Stage-3 depends on stages: Stage-2, Stage-8 + Stage-17 is a root stage + Stage-8 depends on stages: Stage-17 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-16 + Map Reduce Local Work + Alias -> Map Local Tables: + null-subquery2:subq1-subquery2:a + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + null-subquery2:subq1-subquery2:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-17 + Map Reduce Local Work + Alias -> Map Local Tables: + null-subquery1:subq1-subquery1:a + Fetch Operator + limit: -1 + subquery2:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + null-subquery1:subq1-subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + subquery2:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-8 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from +( + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + union all + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key +) subq1 +ORDER BY key, val1, val2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select * from +( + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + union all + select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key +) subq1 +ORDER BY key, val1, val2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +2 12 22 +2 12 22 +3 13 13 +3 13 13 +8 18 18 +8 18 18 +8 18 18 +8 18 18 +8 28 18 +8 28 18 +8 28 18 +8 28 18 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out (working copy) @@ -0,0 +1,197 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) +SKEWED BY (val) ON ((12)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T3 +POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) +SKEWED BY (val) ON ((12)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: -- copy from skewjoinopt13 +-- test compile time skew join and auto map join +-- This test is for skewed join compile time optimization for more than 2 tables. +-- The join key for table 3 is different from the join key used for joining +-- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table +-- 3 consist of a sub-query which contains a join, the compile time skew join +-- optimization is not performed +-- adding a order by at the end to make the results deterministic + +EXPLAIN +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt13 +-- test compile time skew join and auto map join +-- This test is for skewed join compile time optimization for more than 2 tables. +-- The join key for table 3 is different from the join key used for joining +-- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table +-- 3 consist of a sub-query which contains a join, the compile time skew join +-- optimization is not performed +-- adding a order by at the end to make the results deterministic + +EXPLAIN +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-5 depends on stages: Stage-7 + Stage-0 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-7 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and val is not null) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + c + TableScan + alias: c + Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: val is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {_col0} {_col1} {_col5} {_col6} + 1 {key} + keys: + 0 _col1 (type: string) + 1 val (type: string) + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} {_col5} {_col6} + 1 {key} {val} + keys: + 0 _col1 (type: string) + 1 val (type: string) + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +order by a.key, b.key, c.key, a.val, b.val, c.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +order by a.key, b.key, c.key, a.val, b.val, c.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +2 12 2 22 2 12 Index: ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out =================================================================== --- ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out (revision 0) +++ ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out (working copy) @@ -0,0 +1,267 @@ +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T1 +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T2 +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) +SKEWED BY (val) ON ((12)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@T3 +POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) +SKEWED BY (val) ON ((12)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: -- copy from skewjoinopt14 +-- test compile time skew join and auto map join +-- This test is for skewed join compile time optimization for more than 2 tables. +-- The join key for table 3 is different from the join key used for joining +-- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table +-- 3 consist of a sub-query which contains a join, the compile time skew join +-- optimization is not enabled for table 3, but it is used for the first join between +-- tables 1 and 2 +-- adding a order by at the end to make the results deterministic + +EXPLAIN +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +PREHOOK: type: QUERY +POSTHOOK: query: -- copy from skewjoinopt14 +-- test compile time skew join and auto map join +-- This test is for skewed join compile time optimization for more than 2 tables. +-- The join key for table 3 is different from the join key used for joining +-- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table +-- 3 consist of a sub-query which contains a join, the compile time skew join +-- optimization is not enabled for table 3, but it is used for the first join between +-- tables 1 and 2 +-- adding a order by at the end to make the results deterministic + +EXPLAIN +select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-10 is a root stage + Stage-7 depends on stages: Stage-10 + Stage-0 depends on stages: Stage-7 + +STAGE PLANS: + Stage: Stage-10 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + c + Fetch Operator + limit: -1 + subquery1:a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: ((key is not null and val is not null) and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + c + TableScan + alias: c + Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: val is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {_col0} {_col1} {_col5} {_col6} + 1 {key} + keys: + 0 _col1 (type: string) + 1 val (type: string) + subquery1:a + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: ((key is not null and val is not null) and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + + Stage: Stage-7 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (not (key = '2'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} {_col5} {_col6} + 1 {key} {val} + keys: + 0 _col1 (type: string) + 1 val (type: string) + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (key is not null and (key = '2')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {val} + 1 {key} {val} + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + SELECT * : (no compute) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} {_col5} {_col6} + 1 {key} {val} + keys: + 0 _col1 (type: string) + 1 val (type: string) + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +order by a.key, b.key, a.val, b.val +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * +from +T1 a join T2 b on a.key = b.key +join T3 c on a.val = c.val +order by a.key, b.key, a.val, b.val +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +2 12 2 22 2 12 Index: ql/src/test/results/clientpositive/subquery_in.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_in.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/subquery_in.q.out (working copy) @@ -1,110 +1,14 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: -- non agg, non corr +-- non agg, non corr explain select * from src where src.key in (select key from src s1 where s1.key > '9') PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- non agg, non corr explain select * from src @@ -349,25 +253,25 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col0) mode: hash @@ -424,15 +328,15 @@ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(p_size) is not null (type: boolean) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(p_size) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(p_size) (type: double) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_size (type: int) Reduce Operator Tree: Join Operator @@ -442,14 +346,14 @@ 0 {VALUE._col1} {VALUE._col5} 1 outputColumnNames: _col1, _col5 - Statistics: Num rows: 16 Data size: 1744 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 16 Data size: 1744 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 16 Data size: 1744 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -513,31 +417,31 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -553,7 +457,7 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int) Reduce Operator Tree: Group By Operator @@ -561,19 +465,19 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col1 is not null (type: boolean) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -586,22 +490,22 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_size is not null and p_mfgr is not null) (type: boolean) - Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_size (type: int), p_mfgr (type: string) sort order: ++ Map-reduce partition columns: p_size (type: int), p_mfgr (type: string) - Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -610,14 +514,14 @@ 0 {VALUE._col1} {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 4 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -850,34 +754,34 @@ Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: l_partkey is not null (type: boolean) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_partkey (type: int) outputColumnNames: l_partkey - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: l_partkey (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -890,22 +794,22 @@ Map Operator Tree: TableScan alias: li - Statistics: Num rows: 756 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_partkey is not null and l_orderkey is not null) and (l_linenumber = 1)) (type: boolean) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: l_partkey (type: int) sort order: + Map-reduce partition columns: l_partkey (type: int) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE value expressions: l_orderkey (type: int), l_suppkey (type: int) TableScan Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -914,7 +818,7 @@ 0 {KEY.reducesinkkey0} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -930,28 +834,28 @@ key expressions: _col1 (type: int) sort order: + Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col3 (type: int) TableScan alias: lineitem - Statistics: Num rows: 1728 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_shipmode = 'AIR') and l_orderkey is not null) (type: boolean) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_orderkey (type: int) outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -960,14 +864,14 @@ 0 {VALUE._col0} {VALUE._col2} 1 outputColumnNames: _col0, _col3 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out (working copy) @@ -1,83 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem PREHOOK: query: -- non agg, non corr explain rewrite select * Index: ql/src/test/results/clientpositive/subquery_in_having.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_in_having.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/subquery_in_having.q.out (working copy) @@ -1,7 +1,14 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -- data setup -CREATE TABLE part( +DROP TABLE IF EXISTS part_subq +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- data setup +DROP TABLE IF EXISTS part_subq +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE part_subq( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -14,11 +21,8 @@ ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- data setup -CREATE TABLE part( +PREHOOK: Output: default@part_subq +POSTHOOK: query: CREATE TABLE part_subq( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -31,15 +35,15 @@ ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: Output: default@part_subq +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part_subq PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: Output: default@part_subq +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part_subq POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@part +POSTHOOK: Output: default@part_subq PREHOOK: query: -- non agg, non corr explain select key, count(*) @@ -413,11 +417,11 @@ PREHOOK: query: -- agg, non corr explain select p_mfgr, avg(p_size) -from part b +from part_subq b group by b.p_mfgr having b.p_mfgr in (select p_mfgr - from part + from part_subq group by p_mfgr having max(p_size) - min(p_size) < 20 ) @@ -425,11 +429,11 @@ POSTHOOK: query: -- agg, non corr explain select p_mfgr, avg(p_size) -from part b +from part_subq b group by b.p_mfgr having b.p_mfgr in (select p_mfgr - from part + from part_subq group by p_mfgr having max(p_size) - min(p_size) < 20 ) @@ -445,7 +449,7 @@ Map Reduce Map Operator Tree: TableScan - alias: b + alias: part_subq Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_mfgr is not null (type: boolean) @@ -455,30 +459,42 @@ outputColumnNames: p_mfgr, p_size Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: avg(p_size) + aggregations: max(p_size), min(p_size) keys: p_mfgr (type: string) mode: hash - outputColumnNames: _col0, _col1 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: struct) + value expressions: _col1 (type: int), _col2 (type: int) Reduce Operator Tree: Group By Operator - aggregations: avg(VALUE._col0) + aggregations: max(VALUE._col0), min(VALUE._col1) keys: KEY._col0 (type: string) mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 7 Data size: 740 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 - _col2) < 20) (type: boolean) + Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-2 Map Reduce @@ -488,14 +504,14 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: double) + Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double) Reduce Operator Tree: Join Operator condition map: @@ -521,7 +537,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: b Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_mfgr is not null (type: boolean) @@ -531,42 +547,30 @@ outputColumnNames: p_mfgr, p_size Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: max(p_size), min(p_size) + aggregations: avg(p_size) keys: p_mfgr (type: string) mode: hash - outputColumnNames: _col0, _col1, _col2 + outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: int), _col2 (type: int) + value expressions: _col1 (type: struct) Reduce Operator Tree: Group By Operator - aggregations: max(VALUE._col0), min(VALUE._col1) + aggregations: avg(VALUE._col0) keys: KEY._col0 (type: string) mode: mergepartial - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 7 Data size: 740 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((_col1 - _col2) < 20) (type: boolean) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-0 Fetch Operator @@ -577,11 +581,11 @@ PREHOOK: query: -- agg, non corr explain select p_mfgr, avg(p_size) -from part b +from part_subq b group by b.p_mfgr having b.p_mfgr in (select p_mfgr - from part + from part_subq group by p_mfgr having max(p_size) - min(p_size) < 20 ) @@ -589,11 +593,11 @@ POSTHOOK: query: -- agg, non corr explain select p_mfgr, avg(p_size) -from part b +from part_subq b group by b.p_mfgr having b.p_mfgr in (select p_mfgr - from part + from part_subq group by p_mfgr having max(p_size) - min(p_size) < 20 ) @@ -607,7 +611,7 @@ Map Reduce Map Operator Tree: TableScan - alias: b + alias: part_subq Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_mfgr is not null (type: boolean) @@ -617,19 +621,19 @@ outputColumnNames: p_mfgr, p_size Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: avg(p_size) + aggregations: max(p_size), min(p_size) keys: p_mfgr (type: string) mode: hash - outputColumnNames: _col0, _col1 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: struct) + value expressions: _col1 (type: int), _col2 (type: int) TableScan - alias: part + alias: b Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_mfgr is not null (type: boolean) @@ -639,17 +643,17 @@ outputColumnNames: p_mfgr, p_size Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: max(p_size), min(p_size) + aggregations: avg(p_size) keys: p_mfgr (type: string) mode: hash - outputColumnNames: _col0, _col1, _col2 + outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: int), _col2 (type: int) + value expressions: _col1 (type: struct) Reduce Operator Tree: Demux Operator Statistics: Num rows: 30 Data size: 3172 Basic stats: COMPLETE Column stats: NONE @@ -1233,18 +1237,18 @@ PREHOOK: query: -- non agg, non corr, windowing explain select p_mfgr, p_name, avg(p_size) -from part +from part_subq group by p_mfgr, p_name having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part_subq) PREHOOK: type: QUERY POSTHOOK: query: -- non agg, non corr, windowing explain select p_mfgr, p_name, avg(p_size) -from part +from part_subq group by p_mfgr, p_name having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part_subq) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1260,7 +1264,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_subq Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) @@ -1379,7 +1383,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_subq Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_name is not null (type: boolean) @@ -1420,3 +1424,11 @@ Processor Tree: ListSink +PREHOOK: query: DROP TABLE part_subq +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@part_subq +PREHOOK: Output: default@part_subq +POSTHOOK: query: DROP TABLE part_subq +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@part_subq +POSTHOOK: Output: default@part_subq Index: ql/src/test/results/clientpositive/subquery_notin.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_notin.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/subquery_notin.q.out (working copy) @@ -1,99 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem Warning: Shuffle Join JOIN[18][tables = [src, sq_1_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr explain @@ -419,25 +323,25 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 5 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -453,14 +357,14 @@ key expressions: _col1 (type: string), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 16 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col5 (type: int) TableScan Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 5 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -469,17 +373,17 @@ 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} {VALUE._col3} 1 {KEY.reducesinkkey0} outputColumnNames: _col1, _col2, _col5, _col12 - Statistics: Num rows: 17 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col12 is null (type: boolean) - Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -490,23 +394,23 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_wcol0 <= 2) and (_col1 is null or _col2 is null)) (type: boolean) - Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -557,10 +461,10 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) TableScan Reduce Output Operator @@ -574,7 +478,7 @@ 0 {VALUE._col1} {VALUE._col2} {VALUE._col5} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 16 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -663,25 +567,25 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col0) mode: hash @@ -733,7 +637,7 @@ key expressions: UDFToDouble(_col5) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(_col5) (type: double) - Statistics: Num rows: 33 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col5 (type: int) Reduce Operator Tree: Join Operator @@ -743,17 +647,17 @@ 0 {VALUE._col1} {VALUE._col5} 1 {KEY.reducesinkkey0} outputColumnNames: _col1, _col5, _col12 - Statistics: Num rows: 36 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col12 is null (type: boolean) - Statistics: Num rows: 18 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 18 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 18 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -764,25 +668,25 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col0) mode: hash @@ -847,10 +751,10 @@ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_size (type: int) Reduce Operator Tree: Join Operator @@ -860,7 +764,7 @@ 0 {VALUE._col1} {VALUE._col5} 1 outputColumnNames: _col1, _col5 - Statistics: Num rows: 33 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -956,31 +860,31 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -996,7 +900,7 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int) Reduce Operator Tree: Group By Operator @@ -1004,11 +908,11 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -1024,14 +928,14 @@ key expressions: _col5 (type: int), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col5 (type: int), _col2 (type: string) - Statistics: Num rows: 16 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -1040,17 +944,17 @@ 0 {VALUE._col1} {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col1, _col2, _col5, _col12 - Statistics: Num rows: 17 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col12 is null (type: boolean) - Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1061,31 +965,31 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -1101,7 +1005,7 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int) Reduce Operator Tree: Group By Operator @@ -1109,12 +1013,12 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col1 is null or _col0 is null) (type: boolean) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1165,10 +1069,10 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) TableScan Reduce Output Operator @@ -1182,7 +1086,7 @@ 0 {VALUE._col1} {VALUE._col2} {VALUE._col5} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 16 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: Index: ql/src/test/results/clientpositive/subquery_notin_having.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_notin_having.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/subquery_notin_having.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part Warning: Shuffle Join JOIN[22][tables = [gby_sq1, sq_1_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr explain @@ -271,22 +229,22 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: p_mfgr (type: string), p_retailprice (type: double) outputColumnNames: p_mfgr, p_retailprice - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(p_retailprice), max(p_retailprice), avg(p_retailprice) keys: p_mfgr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: struct) Reduce Operator Tree: Group By Operator @@ -294,7 +252,7 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -308,7 +266,7 @@ TableScan Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: double) TableScan Reduce Output Operator @@ -322,7 +280,7 @@ 0 {VALUE._col0} {VALUE._col1} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -338,13 +296,13 @@ key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) - Statistics: Num rows: 31 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) - Statistics: Num rows: 4 Data size: 437 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -353,17 +311,17 @@ 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col4 - Statistics: Num rows: 34 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col4 is null (type: boolean) - Statistics: Num rows: 17 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 17 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 17 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -374,22 +332,22 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: p_mfgr (type: string), p_retailprice (type: double) outputColumnNames: p_mfgr, p_retailprice - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(p_retailprice), max(p_retailprice), avg(p_retailprice) keys: p_mfgr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: struct) Reduce Operator Tree: Group By Operator @@ -397,14 +355,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col2 - _col1) > 600) (type: boolean) - Statistics: Num rows: 4 Data size: 437 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 437 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -417,22 +375,22 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: p_mfgr (type: string), p_retailprice (type: double) outputColumnNames: p_mfgr, p_retailprice - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(p_retailprice), max(p_retailprice), avg(p_retailprice) keys: p_mfgr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: struct) Reduce Operator Tree: Group By Operator @@ -440,12 +398,12 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col0 is null or _col1 is null) and ((_col2 - _col1) > 600)) (type: boolean) - Statistics: Num rows: 2 Data size: 218 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 2 Data size: 218 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -562,22 +520,22 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: p_mfgr (type: string), p_retailprice (type: double) outputColumnNames: p_mfgr, p_retailprice - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(p_retailprice), max(p_retailprice) keys: p_mfgr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double) Reduce Operator Tree: Group By Operator @@ -585,7 +543,7 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -599,7 +557,7 @@ TableScan Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: double) TableScan Reduce Output Operator @@ -613,7 +571,7 @@ 0 {VALUE._col0} {VALUE._col1} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -629,14 +587,14 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 31 Data size: 3490 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) TableScan Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 4 Data size: 437 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -645,17 +603,17 @@ 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 34 Data size: 3839 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col3 is null (type: boolean) - Statistics: Num rows: 17 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 17 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 17 Data size: 1919 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -666,25 +624,25 @@ Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: p_mfgr is null (type: boolean) - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: null (type: void), p_retailprice (type: double) outputColumnNames: p_mfgr, p_retailprice - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(p_retailprice), min(p_retailprice) keys: p_mfgr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double) Reduce Operator Tree: Group By Operator @@ -692,12 +650,12 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 7 Data size: 765 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 - _col2) > 600) (type: boolean) - Statistics: Num rows: 2 Data size: 218 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 2 Data size: 218 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -748,22 +706,22 @@ Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: p_mfgr (type: string), p_retailprice (type: double) outputColumnNames: p_mfgr, p_retailprice - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(p_retailprice), min(p_retailprice) keys: p_mfgr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 29 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double) Reduce Operator Tree: Group By Operator @@ -771,14 +729,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 14 Data size: 1531 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 - _col2) > 600) (type: boolean) - Statistics: Num rows: 4 Data size: 437 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 437 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: Index: ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out (working copy) @@ -1,37 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part PREHOOK: query: create table src11 (key1 string, value1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -319,15 +285,15 @@ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE TableScan alias: b - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_size is not null and p_mfgr is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_size (type: int), p_mfgr (type: string) sort order: ++ Map-reduce partition columns: p_size (type: int), p_mfgr (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) Reduce Operator Tree: Join Operator @@ -337,14 +303,14 @@ 0 {VALUE._col1} {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -384,31 +350,31 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -424,7 +390,7 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int) Reduce Operator Tree: Group By Operator @@ -432,19 +398,19 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col1 is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -457,22 +423,22 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_size is not null and p_mfgr is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_size (type: int), p_mfgr (type: string) sort order: ++ Map-reduce partition columns: p_size (type: int), p_mfgr (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) TableScan Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -481,14 +447,14 @@ 0 {VALUE._col1} {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -810,25 +776,25 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -844,14 +810,14 @@ key expressions: _col1 (type: string), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE value expressions: _col5 (type: int) TableScan Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -860,17 +826,17 @@ 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} {VALUE._col3} 1 {KEY.reducesinkkey0} outputColumnNames: _col1, _col2, _col5, _col12 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col12 is null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -881,23 +847,23 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) Reduce Operator Tree: Extract - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_wcol0 <= 2) and (_col1 is null or _col2 is null)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -948,10 +914,10 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) TableScan Reduce Output Operator @@ -965,7 +931,7 @@ 0 {VALUE._col1} {VALUE._col2} {VALUE._col5} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: Index: ql/src/test/results/clientpositive/temp_table.q.out =================================================================== --- ql/src/test/results/clientpositive/temp_table.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/temp_table.q.out (working copy) @@ -430,9 +430,15 @@ bar bay baz +cbo_t1 +cbo_t2 +cbo_t3 foo +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out =================================================================== --- ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TEMPORARY TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TEMPORARY TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: drop table over10k PREHOOK: type: DROPTABLE POSTHOOK: query: drop table over10k Index: ql/src/test/results/clientpositive/tez/acid_vectorization_partition.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/acid_vectorization_partition.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/acid_vectorization_partition.q.out (working copy) @@ -0,0 +1,60 @@ +PREHOOK: query: CREATE TABLE acid_vectorized_part(a INT, b STRING) partitioned by (ds string) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_vectorized_part +POSTHOOK: query: CREATE TABLE acid_vectorized_part(a INT, b STRING) partitioned by (ds string) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_vectorized_part +PREHOOK: query: insert into table acid_vectorized_part partition (ds = 'today') select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_vectorized_part@ds=today +POSTHOOK: query: insert into table acid_vectorized_part partition (ds = 'today') select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_vectorized_part@ds=today +POSTHOOK: Lineage: acid_vectorized_part PARTITION(ds=today).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_vectorized_part PARTITION(ds=today).b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: insert into table acid_vectorized_part partition (ds = 'tomorrow') select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_vectorized_part@ds=tomorrow +POSTHOOK: query: insert into table acid_vectorized_part partition (ds = 'tomorrow') select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_vectorized_part@ds=tomorrow +POSTHOOK: Lineage: acid_vectorized_part PARTITION(ds=tomorrow).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_vectorized_part PARTITION(ds=tomorrow).b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select * from acid_vectorized_part order by a, b +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_vectorized_part +PREHOOK: Input: default@acid_vectorized_part@ds=today +PREHOOK: Input: default@acid_vectorized_part@ds=tomorrow +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_vectorized_part order by a, b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_vectorized_part +POSTHOOK: Input: default@acid_vectorized_part@ds=today +POSTHOOK: Input: default@acid_vectorized_part@ds=tomorrow +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa today +-1073279343 oj1YrV5Wa tomorrow +-1073051226 A34p7oRr2WvUJNf today +-1073051226 A34p7oRr2WvUJNf tomorrow +-1072910839 0iqrc5 today +-1072910839 0iqrc5 tomorrow +-1072081801 dPkN74F7 today +-1072081801 dPkN74F7 tomorrow +-1072076362 2uLyD28144vklju213J1mr tomorrow +-1072076362 2uLyD28144vklju213J1mr today +-1071480828 aw724t8c5558x2xneC624 today +-1071480828 aw724t8c5558x2xneC624 tomorrow +-1071363017 Anj0oF today +-1071363017 Anj0oF tomorrow +-1070883071 0ruyd6Y50JpdGRf6HqD tomorrow +-1070883071 0ruyd6Y50JpdGRf6HqD today +-1070551679 iUR3Q today +-1070551679 iUR3Q tomorrow +-1069736047 k17Am8uPHWk02cEf1jet tomorrow +-1069736047 k17Am8uPHWk02cEf1jet today Index: ql/src/test/results/clientpositive/tez/acid_vectorization_project.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/acid_vectorization_project.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/acid_vectorization_project.q.out (working copy) @@ -0,0 +1,73 @@ +PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_vectorized +POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_vectorized +PREHOOK: query: insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_vectorized +POSTHOOK: query: insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_vectorized +POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_vectorized.c SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +PREHOOK: query: select a,b from acid_vectorized order by a +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_vectorized +#### A masked pattern was here #### +POSTHOOK: query: select a,b from acid_vectorized order by a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_vectorized +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa +-1073051226 A34p7oRr2WvUJNf +-1072910839 0iqrc5 +-1072081801 dPkN74F7 +-1072076362 2uLyD28144vklju213J1mr +-1071480828 aw724t8c5558x2xneC624 +-1071363017 Anj0oF +-1070883071 0ruyd6Y50JpdGRf6HqD +-1070551679 iUR3Q +-1069736047 k17Am8uPHWk02cEf1jet +PREHOOK: query: select a,c from acid_vectorized order by a +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_vectorized +#### A masked pattern was here #### +POSTHOOK: query: select a,c from acid_vectorized order by a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_vectorized +#### A masked pattern was here #### +-1073279343 11.0 +-1073051226 NULL +-1072910839 11.0 +-1072081801 NULL +-1072076362 NULL +-1071480828 -51.0 +-1071363017 8.0 +-1070883071 NULL +-1070551679 NULL +-1069736047 11.0 +PREHOOK: query: select b,c from acid_vectorized order by b +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_vectorized +#### A masked pattern was here #### +POSTHOOK: query: select b,c from acid_vectorized order by b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_vectorized +#### A masked pattern was here #### +0iqrc5 11.0 +0ruyd6Y50JpdGRf6HqD NULL +2uLyD28144vklju213J1mr NULL +A34p7oRr2WvUJNf NULL +Anj0oF 8.0 +aw724t8c5558x2xneC624 -51.0 +dPkN74F7 NULL +iUR3Q NULL +k17Am8uPHWk02cEf1jet 11.0 +oj1YrV5Wa 11.0 Index: ql/src/test/results/clientpositive/tez/cbo_correctness.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_correctness.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/cbo_correctness.q.out (working copy) @@ -1,19107 +0,0 @@ -PREHOOK: query: drop table if exists t1 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t1 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists t2 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t2 -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists t3 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t3 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@dt=2014 -PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t2@dt=2014 -PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: create table src_cbo as select * from src -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src -PREHOOK: Output: database:default -PREHOOK: Output: default@src_cbo -POSTHOOK: query: create table src_cbo as select * from src -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src -POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_cbo -PREHOOK: query: analyze table t1 partition (dt) compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t1@dt=2014 -POSTHOOK: query: analyze table t1 partition (dt) compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@dt=2014 -PREHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: analyze table t2 partition (dt) compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t2@dt=2014 -POSTHOOK: query: analyze table t2 partition (dt) compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t2@dt=2014 -PREHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: analyze table t3 compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -PREHOOK: Output: default@t3 -POSTHOOK: query: analyze table t3 compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -POSTHOOK: Output: default@t3 -PREHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -PREHOOK: query: analyze table src_cbo compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -PREHOOK: Output: default@src_cbo -POSTHOOK: query: analyze table src_cbo compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -POSTHOOK: Output: default@src_cbo -PREHOOK: query: analyze table src_cbo compute statistics for columns -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: analyze table src_cbo compute statistics for columns -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -PREHOOK: query: analyze table part compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@part -PREHOOK: Output: default@part -POSTHOOK: query: analyze table part compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -POSTHOOK: Output: default@part -PREHOOK: query: analyze table part compute statistics for columns -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: analyze table part compute statistics for columns -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -PREHOOK: query: analyze table lineitem compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -PREHOOK: Output: default@lineitem -POSTHOOK: query: analyze table lineitem compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -POSTHOOK: Output: default@lineitem -PREHOOK: query: analyze table lineitem compute statistics for columns -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -#### A masked pattern was here #### -POSTHOOK: query: analyze table lineitem compute statistics for columns -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -#### A masked pattern was here #### -PREHOOK: query: -- 1. Test Select + TS -select * from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 1. Test Select + TS -select * from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select * from t1 as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select * from t1 as t2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -null NULL NULL -null NULL NULL -PREHOOK: query: -- 2. Test Select + TS + FIL -select * from t1 where t1.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 2. Test Select + TS + FIL -select * from t1 where t1.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from t1 where t1.c_int >= 0) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from t1 where t1.c_int >= 0) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 - 1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -1 1 25.0 -PREHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -2.0 1 25.0 -PREHOOK: query: -- 4. Test Select + Join + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 4. Test Select + Join + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select t1.key from t1 join t3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select t1.key from t1 join t3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -null -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL 2 -NULL NULL -NULL NULL -NULL NULL -NULL NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 - 1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -1 1 1 1 1 -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -null NULL null NULL null -PREHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -1 1.0 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -PREHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -1 1 1 1.0 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -NULL NULL null NULL NULL -PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 - 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -null NULL null NULL -PREHOOK: query: -- 5. Test Select + Join + FIL + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 5. Test Select + Join + FIL + TS -select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -1 1 -PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -1 1.0 1 1 1 -PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -1 1 1 1.0 1 -PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having -select * from t1 group by c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having -select * from t1 group by c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL -1 -PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -null NULL NULL - 1 4 2 - 1 4 2 -1 4 12 -1 4 2 -PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL NULL -2 5.0 -12 5.0 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 2 -1 12 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 2 -1 12 -PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select * from t1 group by c_int limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select * from t1 group by c_int limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL -PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -null NULL NULL -PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -NULL NULL -PREHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -null NULL -null NULL -1 1 -1 1 -1 1 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 12 -1 2 -PREHOOK: query: -- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -20 18 18 1.0 1 1 -PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2 0 NULL NULL NULL NULL 3 6 -18 18 18 1.0 1 1 2 36 -PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -20 1 18 1.0 1 1 -PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2 0 NULL NULL NULL NULL 3 6 -18 1 18 1.0 1 1 2 36 -PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 20 1 18 -PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 20 1 1 -PREHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -2 1.0 1 -2 1.0 1 -12 1.0 1 -2 1.0 1 -0 NULL null -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -0 NULL -1 1.0 -PREHOOK: query: -- 9. Test Windowing Functions -select count(c_int) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 9. Test Windowing Functions -select count(c_int) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 18.0 1 1 1 1 1 0.0 1 NULL -18 18.0 1 1 2 1 1 0.0 1 NULL -18 18.0 1 1 3 1 1 0.0 1 NULL -18 18.0 1 1 4 1 1 0.0 1 NULL -18 18.0 1 1 5 1 1 0.0 1 1.0 -18 18.0 1 1 6 1 1 0.0 1 1.0 -18 18.0 1 1 7 1 1 0.0 1 1.0 -18 18.0 1 1 8 1 1 0.0 1 1.0 -18 18.0 1 1 9 1 1 0.0 1 1.0 -18 18.0 1 1 10 1 1 0.0 1 1.0 -18 18.0 1 1 11 1 1 0.0 1 1.0 -18 18.0 1 1 12 1 1 0.0 1 1.0 -18 18.0 1 1 13 1 1 0.0 1 1.0 -18 18.0 1 1 14 1 1 0.0 1 1.0 -18 18.0 1 1 15 1 1 0.0 1 1.0 -18 18.0 1 1 16 1 1 0.0 1 1.0 -18 18.0 1 1 17 1 1 0.0 1 1.0 -18 18.0 1 1 18 1 1 0.0 1 1.0 -18 18.0 1 1 19 1 1 0.0 1 1.0 -18 18.0 1 1 20 1 1 0.0 1 1.0 -PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 18.0 1 1 1 1 1 0.0 1 NULL -18 18.0 1 1 2 1 1 0.0 1 NULL -18 18.0 1 1 3 1 1 0.0 1 NULL -18 18.0 1 1 4 1 1 0.0 1 NULL -18 18.0 1 1 5 1 1 0.0 1 1.0 -18 18.0 1 1 6 1 1 0.0 1 1.0 -18 18.0 1 1 7 1 1 0.0 1 1.0 -18 18.0 1 1 8 1 1 0.0 1 1.0 -18 18.0 1 1 9 1 1 0.0 1 1.0 -18 18.0 1 1 10 1 1 0.0 1 1.0 -18 18.0 1 1 11 1 1 0.0 1 1.0 -18 18.0 1 1 12 1 1 0.0 1 1.0 -18 18.0 1 1 13 1 1 0.0 1 1.0 -18 18.0 1 1 14 1 1 0.0 1 1.0 -18 18.0 1 1 15 1 1 0.0 1 1.0 -18 18.0 1 1 16 1 1 0.0 1 1.0 -18 18.0 1 1 17 1 1 0.0 1 1.0 -18 18.0 1 1 18 1 1 0.0 1 1.0 -18 18.0 1 1 19 1 1 0.0 1 1.0 -18 18.0 1 1 20 1 1 0.0 1 1.0 -PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -18 -PREHOOK: query: select 1+sum(c_int) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select 1+sum(c_int) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -19 -PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -36 -PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 -1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 -1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 -1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 -NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 -NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -PREHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from t1 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from t1 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -PREHOOK: query: create view v2 as select c_int, value from t2 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t2 -PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select c_int, value from t2 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t2 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -PREHOOK: query: select value from v1 where c_boolean=false -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select value from v1 where c_boolean=false -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -1 -1 -PREHOOK: query: select max(c_int) from v1 group by (c_boolean) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -NULL -1 -1 -PREHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -234 -PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 -#### A masked pattern was here #### -POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 -#### A masked pattern was here #### -234 -PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -160 -PREHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 -PREHOOK: Input: default@v1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v3 -POSTHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@v1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v3 -PREHOOK: query: select count(val) from v3 where val != '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v3 -#### A masked pattern was here #### -POSTHOOK: query: select count(val) from v3 where val != '1' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v3 -#### A masked pattern was here #### -96 -PREHOOK: query: with q1 as ( select key from t1 where key = '1') -select count(*) from q1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select key from t1 where key = '1') -select count(*) from q1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -12 -PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) -select count(value) from q1 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) -select count(value) from q1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -2 -PREHOOK: query: create view v4 as -with q1 as ( select key,c_int from t1 where key = '1') -select * from q1 -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 -PREHOOK: Output: database:default -PREHOOK: Output: default@v4 -POSTHOOK: query: create view v4 as -with q1 as ( select key,c_int from t1 where key = '1') -select * from q1 -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@v4 -PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') -select sum(c_int) from (select c_int from q1) a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') -select sum(c_int) from (select c_int from q1) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -#### A masked pattern was here #### -2 -PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v4 -#### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v4 -#### A masked pattern was here #### -31104 -PREHOOK: query: drop view v1 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: drop view v2 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v2 -PREHOOK: Output: default@v2 -POSTHOOK: query: drop view v2 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v2 -POSTHOOK: Output: default@v2 -PREHOOK: query: drop view v3 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v3 -PREHOOK: Output: default@v3 -POSTHOOK: query: drop view v3 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v3 -POSTHOOK: Output: default@v3 -PREHOOK: query: drop view v4 -PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v4 -PREHOOK: Output: default@v4 -POSTHOOK: query: drop view v4 -POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v4 -POSTHOOK: Output: default@v4 -PREHOOK: query: -- 11. Union All -select * from t1 union all select * from t2 order by key, c_boolean, value, dt -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 11. Union All -select * from t1 union all select * from t2 order by key, c_boolean, value, dt -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 - 1 1 1 1.0 true 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 false 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -1 1 1 1.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -2 2 2 2.0 true 2014 -null null NULL NULL NULL 2014 -null null NULL NULL NULL 2014 -PREHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -2 -2 -2 -2 -2 -2 -2 -3 -3 -3 -PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -PREHOOK: query: -- 12. SemiJoin -select t1.c_int from t1 left semi join t2 on t1.key=t2.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 12. SemiJoin -select t1.c_int from t1 left semi join t2 on t1.key=t2.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -NULL -NULL -PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -1.0 1 1 -PREHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -1 1.0 1 -PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -1 1 1.0 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 2 - 1 2 -1 2 -1 12 -PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### - 1 2 - 1 2 -1 2 -1 12 -PREHOOK: query: -- 13. null expr in select list -select null from t3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: -- 13. null expr in select list -select null from t3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -NULL -PREHOOK: query: -- 14. unary operator -select key from t1 where c_int = -6 or c_int = +6 -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 14. unary operator -select key from t1 where c_int = -6 or c_int = +6 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: -- 15. query referencing only partition columns -select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=2014 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 15. query referencing only partition columns -select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=2014 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -400 -PREHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * -from src_cbo -where src_cbo.key not in - ( select key from src_cbo s1 - where s1.key > '2' - ) order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * -from src_cbo -where src_cbo.key not in - ( select key from src_cbo s1 - where s1.key > '2' - ) order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -10 val_10 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -15 val_15 -15 val_15 -150 val_150 -152 val_152 -152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -17 val_17 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -18 val_18 -18 val_18 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -19 val_19 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -2 val_2 -PREHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size as r from part) a - where r < 10 and b.p_mfgr = a.p_mfgr - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size as r from part) a - where r < 10 and b.p_mfgr = a.p_mfgr - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#5 almond antique blue firebrick mint 31 -Manufacturer#3 almond antique chartreuse khaki white 17 -Manufacturer#1 almond antique chartreuse lavender yellow 34 -Manufacturer#3 almond antique forest lavender goldenrod 14 -Manufacturer#4 almond antique gainsboro frosted violet 10 -Manufacturer#3 almond antique metallic orange dim 19 -Manufacturer#3 almond antique olive coral navajo 45 -Manufacturer#2 almond antique violet chocolate turquoise 14 -Manufacturer#4 almond antique violet mint lemon 39 -Manufacturer#2 almond antique violet turquoise frosted 40 -Manufacturer#1 almond aquamarine burnished black steel 28 -Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Manufacturer#4 almond aquamarine floral ivory bisque 27 -Manufacturer#1 almond aquamarine pink moccasin thistle 42 -Manufacturer#2 almond aquamarine rose maroon antique 25 -Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -Manufacturer#4 almond azure aquamarine papaya violet 12 -Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, non corr -select p_name, p_size -from -part where part.p_size not in - (select avg(p_size) - from (select p_size from part) a - where p_size < 10 - ) order by p_name -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr -select p_name, p_size -from -part where part.p_size not in - (select avg(p_size) - from (select p_size from part) a - where p_size < 10 - ) order by p_name -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -almond antique blue firebrick mint 31 -almond antique burnished rose metallic 2 -almond antique burnished rose metallic 2 -almond antique chartreuse khaki white 17 -almond antique chartreuse lavender yellow 34 -almond antique forest lavender goldenrod 14 -almond antique gainsboro frosted violet 10 -almond antique medium spring khaki 6 -almond antique metallic orange dim 19 -almond antique misty red olive 1 -almond antique olive coral navajo 45 -almond antique salmon chartreuse burlywood 6 -almond antique sky peru orange 2 -almond antique violet chocolate turquoise 14 -almond antique violet mint lemon 39 -almond antique violet turquoise frosted 40 -almond aquamarine burnished black steel 28 -almond aquamarine dodger light gainsboro 46 -almond aquamarine floral ivory bisque 27 -almond aquamarine midnight light salmon 2 -almond aquamarine pink moccasin thistle 42 -almond aquamarine rose maroon antique 25 -almond aquamarine sandy cyan gainsboro 18 -almond aquamarine yellow dodger mint 7 -almond azure aquamarine papaya violet 12 -almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size from part) a - where p_size < 10 and b.p_mfgr = a.p_mfgr - ) order by p_name -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size from part) a - where p_size < 10 and b.p_mfgr = a.p_mfgr - ) order by p_name -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#5 almond antique blue firebrick mint 31 -Manufacturer#3 almond antique chartreuse khaki white 17 -Manufacturer#1 almond antique chartreuse lavender yellow 34 -Manufacturer#3 almond antique forest lavender goldenrod 14 -Manufacturer#4 almond antique gainsboro frosted violet 10 -Manufacturer#5 almond antique medium spring khaki 6 -Manufacturer#3 almond antique metallic orange dim 19 -Manufacturer#3 almond antique olive coral navajo 45 -Manufacturer#1 almond antique salmon chartreuse burlywood 6 -Manufacturer#2 almond antique violet chocolate turquoise 14 -Manufacturer#4 almond antique violet mint lemon 39 -Manufacturer#2 almond antique violet turquoise frosted 40 -Manufacturer#1 almond aquamarine burnished black steel 28 -Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Manufacturer#4 almond aquamarine floral ivory bisque 27 -Manufacturer#1 almond aquamarine pink moccasin thistle 42 -Manufacturer#2 almond aquamarine rose maroon antique 25 -Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -Manufacturer#4 almond azure aquamarine papaya violet 12 -Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) -from lineitem li -where li.l_linenumber = 1 and - li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') -group by li.l_partkey -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) -from lineitem li -where li.l_linenumber = 1 and - li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') -group by li.l_partkey -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -#### A masked pattern was here #### -450 1 -7068 1 -21636 1 -22630 1 -59694 1 -61931 1 -85951 1 -88035 1 -88362 1 -106170 1 -119477 1 -119767 1 -123076 1 -139636 1 -175839 1 -182052 1 -PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a - where min(p_retailprice) = l and r - l > 600 - ) - order by b.p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a - where min(p_retailprice) = l and r - l > 600 - ) - order by b.p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#1 1173.15 -Manufacturer#2 1690.68 -PREHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from part a - group by p_mfgr - having max(p_retailprice) - min(p_retailprice) > 600 - ) - order by b.p_mfgr -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) -from part b -group by b.p_mfgr -having b.p_mfgr not in - (select p_mfgr - from part a - group by p_mfgr - having max(p_retailprice) - min(p_retailprice) > 600 - ) - order by b.p_mfgr -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#1 1173.15 -Manufacturer#2 1690.68 -PREHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * -from src_cbo -where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * -from src_cbo -where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * -from src_cbo b -where b.key in - (select distinct a.key - from src_cbo a - where b.value = a.value and a.key > '9' - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * -from src_cbo b -where b.key in - (select distinct a.key - from src_cbo a - where b.value = a.value and a.key > '9' - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey -where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) -PREHOOK: type: QUERY -PREHOOK: Input: default@lineitem -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey -where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@lineitem -#### A masked pattern was here #### -4297 1798 -108570 8571 -PREHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) -from src_cbo b -where b.key in (select key from src_cbo where src_cbo.key > '8') -group by key, value -having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) -from src_cbo b -where b.key in (select key from src_cbo where src_cbo.key > '8') -group by key, value -having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -80 val_80 1 -96 val_96 1 -92 val_92 1 -9 val_9 1 -87 val_87 1 -86 val_86 1 -85 val_85 1 -82 val_82 1 -84 val_84 2 -95 val_95 2 -83 val_83 2 -98 val_98 2 -97 val_97 2 -90 val_90 3 -PREHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) -from part -group by p_mfgr, p_name -having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) -PREHOOK: type: QUERY -PREHOOK: Input: default@part -#### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) -from part -group by p_mfgr, p_name -having p_name in - (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part -#### A masked pattern was here #### -Manufacturer#1 almond antique burnished rose metallic 2.0 -Manufacturer#3 almond antique misty red olive 1.0 -Manufacturer#5 almond antique sky peru orange 2.0 -Manufacturer#2 almond aquamarine midnight light salmon 2.0 -Manufacturer#4 almond aquamarine yellow dodger mint 7.0 -PREHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * -from src_cbo b -where not exists - (select distinct a.key - from src_cbo a - where b.value = a.value and a.value > 'val_2' - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * -from src_cbo b -where not exists - (select distinct a.key - from src_cbo a - where b.value = a.value and a.value > 'val_2' - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -10 val_10 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 -128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 -146 val_146 -149 val_149 -149 val_149 -15 val_15 -15 val_15 -150 val_150 -152 val_152 -152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 -166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -17 val_17 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -18 val_18 -18 val_18 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -19 val_19 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -2 val_2 -PREHOOK: query: -- no agg, corr, having -select * -from src_cbo b -group by key, value -having not exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_12' - ) -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- no agg, corr, having -select * -from src_cbo b -group by key, value -having not exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_12' - ) -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -105 val_105 -11 val_11 -111 val_111 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -119 val_119 -12 val_12 -PREHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as -select * -from src_cbo b -where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') -PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_cbo -PREHOOK: Output: database:default -PREHOOK: Output: default@cv1 -POSTHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as -select * -from src_cbo b -where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_cbo -POSTHOOK: Output: database:default -POSTHOOK: Output: default@cv1 -PREHOOK: query: select * from cv1 -PREHOOK: type: QUERY -PREHOOK: Input: default@cv1 -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: select * from cv1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@cv1 -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- sq in from -select * -from (select * - from src_cbo b - where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') - ) a -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * -from (select * - from src_cbo b - where exists - (select a.key - from src_cbo a - where b.value = a.value and a.key = b.key and a.value > 'val_9') - ) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: -- sq in from, having -select * -from (select b.key, count(*) - from src_cbo b - group by b.key - having exists - (select a.key - from src_cbo a - where a.key = b.key and a.value > 'val_9' - ) -) a -PREHOOK: type: QUERY -PREHOOK: Input: default@src_cbo -#### A masked pattern was here #### -POSTHOOK: query: -- sq in from, having -select * -from (select b.key, count(*) - from src_cbo b - group by b.key - having exists - (select a.key - from src_cbo a - where a.key = b.key and a.value > 'val_9' - ) -) a -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_cbo -#### A masked pattern was here #### -90 3 -92 1 -95 2 -96 1 -97 2 -98 2 -PREHOOK: query: -- 20. Test get stats with empty partition list -select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true -PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -POSTHOOK: query: -- 20. Test get stats with empty partition list -select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=2014 -#### A masked pattern was here #### -PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -tst1 -PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -tst1 500 -PREHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -avg -max -min -PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -avg 1.5 -max 3.0 -min 1.0 -PREHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc group by unionsrc.key order by unionsrc.key -PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -#### A masked pattern was here #### -POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from t3 s1 - UNION ALL - select 'min' as key, min(c_int) as value from t3 s2 - UNION ALL - select 'avg' as key, avg(c_int) as value from t3 s3) unionsrc group by unionsrc.key order by unionsrc.key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -#### A masked pattern was here #### -avg 1 -max 1 -min 1 -PREHOOK: query: -- Windowing -select *, rank() over(partition by key order by value) as rr from src1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: -- Windowing -select *, rank() over(partition by key order by value) as rr from src1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - val_165 5 - val_193 6 - val_265 7 - val_27 8 - val_409 9 - val_484 10 -128 1 -146 val_146 1 -150 val_150 1 -213 val_213 1 -224 1 -238 val_238 1 -255 val_255 1 -273 val_273 1 -278 val_278 1 -311 val_311 1 -369 1 -401 val_401 1 -406 val_406 1 -66 val_66 1 -98 val_98 1 -PREHOOK: query: select *, rank() over(partition by key order by value) from src1 -PREHOOK: type: QUERY -PREHOOK: Input: default@src1 -#### A masked pattern was here #### -POSTHOOK: query: select *, rank() over(partition by key order by value) from src1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src1 -#### A masked pattern was here #### - 1 - 1 - 1 - 1 - val_165 5 - val_193 6 - val_265 7 - val_27 8 - val_409 9 - val_484 10 -128 1 -146 val_146 1 -150 val_150 1 -213 val_213 1 -224 1 -238 val_238 1 -255 val_255 1 -273 val_273 1 -278 val_278 1 -311 val_311 1 -369 1 -401 val_401 1 -406 val_406 1 -66 val_66 1 -98 val_98 1 Index: ql/src/test/results/clientpositive/tez/cbo_gby.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_gby.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_gby.q.out (working copy) @@ -0,0 +1,132 @@ +PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from cbo_t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from cbo_t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL +1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL + 1 4 2 + 1 4 2 +1 4 12 +1 4 2 +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +2 5.0 +12 5.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 2 +1 12 Index: ql/src/test/results/clientpositive/tez/cbo_gby_empty.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_gby_empty.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_gby_empty.q.out (working copy) @@ -0,0 +1,77 @@ +PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr +select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr +select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +tst1 +PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +tst1 500 +PREHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +avg +max +min +PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +avg 1.5 +max 3.0 +min 1.0 +PREHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 + UNION ALL + select 'min' as key, min(c_int) as value from cbo_t3 s2 + UNION ALL + select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +avg 1 +max 1 +min 1 Index: ql/src/test/results/clientpositive/tez/cbo_join.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_join.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_join.q.out (working copy) @@ -0,0 +1,15118 @@ +PREHOOK: query: -- 4. Test Select + Join + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 4. Test Select + Join + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +PREHOOK: query: select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: -- 5. Test Select + Join + FIL + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 5. Test Select + Join + FIL + TS +select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or cbo_t2.q >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or cbo_t2.q >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +PREHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 Index: ql/src/test/results/clientpositive/tez/cbo_limit.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_limit.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_limit.q.out (working copy) @@ -0,0 +1,101 @@ +PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from cbo_t1 group by c_int limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from cbo_t1 group by c_int limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +null NULL +null NULL +1 1 +1 1 +1 1 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 12 +1 2 Index: ql/src/test/results/clientpositive/tez/cbo_semijoin.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_semijoin.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_semijoin.q.out (working copy) @@ -0,0 +1,442 @@ +PREHOOK: query: -- 12. SemiJoin +select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 12. SemiJoin +select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +NULL +NULL +PREHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +PREHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +PREHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 Index: ql/src/test/results/clientpositive/tez/cbo_simple_select.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_simple_select.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_simple_select.q.out (working copy) @@ -0,0 +1,745 @@ +PREHOOK: query: -- 1. Test Select + TS +select * from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 1. Test Select + TS +select * from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t2 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +null NULL NULL +null NULL NULL +PREHOOK: query: -- 2. Test Select + TS + FIL +select * from cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 2. Test Select + TS + FIL +select * from cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: -- 13. null expr in select list +select null from cbo_t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: -- 13. null expr in select list +select null from cbo_t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- 14. unary operator +select key from cbo_t1 where c_int = -6 or c_int = +6 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 14. unary operator +select key from cbo_t1 where c_int = -6 or c_int = +6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: -- 15. query referencing only partition columns +select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 15. query referencing only partition columns +select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +400 Index: ql/src/test/results/clientpositive/tez/cbo_stats.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_stats.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_stats.q.out (working copy) @@ -0,0 +1,14 @@ +PREHOOK: query: -- 20. Test get stats with empty partition list +select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 20. Test get stats with empty partition list +select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### Index: ql/src/test/results/clientpositive/tez/cbo_subq_exists.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_subq_exists.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_subq_exists.q.out (working copy) @@ -0,0 +1,297 @@ +PREHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +PREHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src_cbo +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 +POSTHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cv1 +PREHOOK: query: select * from cv1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cv1 +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: select * from cv1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 Index: ql/src/test/results/clientpositive/tez/cbo_subq_in.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_subq_in.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_subq_in.q.out (working copy) @@ -0,0 +1,149 @@ +PREHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +4297 1798 +108570 8571 +PREHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +80 val_80 1 +96 val_96 1 +92 val_92 1 +9 val_9 1 +87 val_87 1 +86 val_86 1 +85 val_85 1 +82 val_82 1 +84 val_84 2 +95 val_95 2 +83 val_83 2 +98 val_98 2 +97 val_97 2 +90 val_90 3 +PREHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2.0 +Manufacturer#3 almond antique misty red olive 1.0 +Manufacturer#5 almond antique sky peru orange 2.0 +Manufacturer#2 almond aquamarine midnight light salmon 2.0 +Manufacturer#4 almond aquamarine yellow dodger mint 7.0 Index: ql/src/test/results/clientpositive/tez/cbo_subq_not_in.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_subq_not_in.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_subq_not_in.q.out (working copy) @@ -0,0 +1,365 @@ +PREHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +almond antique blue firebrick mint 31 +almond antique burnished rose metallic 2 +almond antique burnished rose metallic 2 +almond antique chartreuse khaki white 17 +almond antique chartreuse lavender yellow 34 +almond antique forest lavender goldenrod 14 +almond antique gainsboro frosted violet 10 +almond antique medium spring khaki 6 +almond antique metallic orange dim 19 +almond antique misty red olive 1 +almond antique olive coral navajo 45 +almond antique salmon chartreuse burlywood 6 +almond antique sky peru orange 2 +almond antique violet chocolate turquoise 14 +almond antique violet mint lemon 39 +almond antique violet turquoise frosted 40 +almond aquamarine burnished black steel 28 +almond aquamarine dodger light gainsboro 46 +almond aquamarine floral ivory bisque 27 +almond aquamarine midnight light salmon 2 +almond aquamarine pink moccasin thistle 42 +almond aquamarine rose maroon antique 25 +almond aquamarine sandy cyan gainsboro 18 +almond aquamarine yellow dodger mint 7 +almond azure aquamarine papaya violet 12 +almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +450 1 +7068 1 +21636 1 +22630 1 +59694 1 +61931 1 +85951 1 +88035 1 +88362 1 +106170 1 +119477 1 +119767 1 +123076 1 +139636 1 +175839 1 +182052 1 +PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 Index: ql/src/test/results/clientpositive/tez/cbo_udf_udaf.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_udf_udaf.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_udf_udaf.q.out (working copy) @@ -0,0 +1,121 @@ +PREHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +20 18 18 1.0 1 1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 18 18 1.0 1 1 2 36 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +20 1 18 1.0 1 1 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 1 18 1.0 1 1 2 36 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 20 1 18 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 20 1 1 +PREHOOK: query: select count(c_int) as a, avg(c_float), key from cbo_t1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) as a, avg(c_float), key from cbo_t1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +2 1.0 1 +2 1.0 1 +12 1.0 1 +2 1.0 1 +0 NULL null +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 Index: ql/src/test/results/clientpositive/tez/cbo_union.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_union.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_union.q.out (working copy) @@ -0,0 +1,916 @@ +PREHOOK: query: -- 11. Union All +select * from cbo_t1 order by key, c_boolean, value, dt union all select * from cbo_t2 order by key, c_boolean, value, dt +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 11. Union All +select * from cbo_t1 order by key, c_boolean, value, dt union all select * from cbo_t2 order by key, c_boolean, value, dt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +#### A masked pattern was here #### + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### +POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@cbo_t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 Index: ql/src/test/results/clientpositive/tez/cbo_views.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_views.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_views.q.out (working copy) @@ -0,0 +1,237 @@ +PREHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 +POSTHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v1 +PREHOOK: query: create view v2 as select c_int, value from cbo_t2 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v2 +POSTHOOK: query: create view v2 as select c_int, value from cbo_t2 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v2 +PREHOOK: query: select value from v1 where c_boolean=false +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select value from v1 where c_boolean=false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +1 +1 +PREHOOK: query: select max(c_int) from v1 group by (c_boolean) +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +NULL +1 +1 +PREHOOK: query: select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@cbo_t2 +PREHOOK: Input: default@cbo_t2@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v2 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@cbo_t2 +POSTHOOK: Input: default@cbo_t2@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v2 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +160 +PREHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@v1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v3 +POSTHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@v1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v3 +PREHOOK: query: select count(val) from v3 where val != '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v3 +#### A masked pattern was here #### +POSTHOOK: query: select count(val) from v3 where val != '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v3 +#### A masked pattern was here #### +96 +PREHOOK: query: with q1 as ( select key from cbo_t1 where key = '1') +select count(*) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select key from cbo_t1 where key = '1') +select count(*) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +12 +PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: create view v4 as +with q1 as ( select key,c_int from cbo_t1 where key = '1') +select * from q1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@cbo_t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v4 +POSTHOOK: query: create view v4 as +with q1 as ( select key,c_int from cbo_t1 where key = '1') +select * from q1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v4 +PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v4 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v4 +#### A masked pattern was here #### +31104 +PREHOOK: query: drop view v1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: drop view v1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop view v2 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v2 +PREHOOK: Output: default@v2 +POSTHOOK: query: drop view v2 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v2 +POSTHOOK: Output: default@v2 +PREHOOK: query: drop view v3 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v3 +PREHOOK: Output: default@v3 +POSTHOOK: query: drop view v3 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v3 +POSTHOOK: Output: default@v3 +PREHOOK: query: drop view v4 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v4 +PREHOOK: Output: default@v4 +POSTHOOK: query: drop view v4 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v4 +POSTHOOK: Output: default@v4 Index: ql/src/test/results/clientpositive/tez/cbo_windowing.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cbo_windowing.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cbo_windowing.q.out (working copy) @@ -0,0 +1,289 @@ +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select 1+sum(c_int) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select 1+sum(c_int) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +36 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cbo_t1 +PREHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cbo_t1 +POSTHOOK: Input: default@cbo_t1@dt=2014 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select *, rank() over(partition by key order by value) as rr from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select *, rank() over(partition by key order by value) as rr from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + val_165 5 + val_193 6 + val_265 7 + val_27 8 + val_409 9 + val_484 10 +128 1 +146 val_146 1 +150 val_150 1 +213 val_213 1 +224 1 +238 val_238 1 +255 val_255 1 +273 val_273 1 +278 val_278 1 +311 val_311 1 +369 1 +401 val_401 1 +406 val_406 1 +66 val_66 1 +98 val_98 1 +PREHOOK: query: select *, rank() over(partition by key order by value) from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select *, rank() over(partition by key order by value) from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + val_165 5 + val_193 6 + val_265 7 + val_27 8 + val_409 9 + val_484 10 +128 1 +146 val_146 1 +150 val_150 1 +213 val_213 1 +224 1 +238 val_238 1 +255 val_255 1 +273 val_273 1 +278 val_278 1 +311 val_311 1 +369 1 +401 val_401 1 +406 val_406 1 +66 val_66 1 +98 val_98 1 Index: ql/src/test/results/clientpositive/tez/orc_merge1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge1.q.out (working copy) @@ -117,48 +117,8 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge1 partition (ds='1', part='0') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge1 -POSTHOOK: query: DESC FORMATTED orcfile_merge1 partition (ds='1', part='0') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge1 -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string -part string - -# Detailed Partition Information -Partition Value: [1, 0] -Database: default -Table: orcfile_merge1 +Found 6 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 6 - numRows 242 - rawDataSize 22748 - totalSize 3037 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: -- auto-merge slow way EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) @@ -286,48 +246,8 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge1b partition (ds='1', part='0') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge1b -POSTHOOK: query: DESC FORMATTED orcfile_merge1b partition (ds='1', part='0') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge1b -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string -part string - -# Detailed Partition Information -Partition Value: [1, 0] -Database: default -Table: orcfile_merge1b +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 242 - rawDataSize 22748 - totalSize 1325 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: -- auto-merge fast way EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) @@ -447,48 +367,8 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge1c partition (ds='1', part='0') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge1c -POSTHOOK: query: DESC FORMATTED orcfile_merge1c partition (ds='1', part='0') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge1c -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string -part string - -# Detailed Partition Information -Partition Value: [1, 0] -Database: default -Table: orcfile_merge1c +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 242 - rawDataSize 22748 - totalSize 2392 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: -- Verify SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) Index: ql/src/test/results/clientpositive/tez/orc_merge2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge2.q.out (working copy) @@ -146,49 +146,8 @@ POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge2a partition (one='1', two='0', three='2') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge2a -POSTHOOK: query: DESC FORMATTED orcfile_merge2a partition (one='1', two='0', three='2') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge2a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -one string -two string -three string - -# Detailed Partition Information -Partition Value: [1, 0, 2] -Database: default -Table: orcfile_merge2a +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 4 - rawDataSize 376 - totalSize 320 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge2a Index: ql/src/test/results/clientpositive/tez/orc_merge3.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge3.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge3.q.out (working copy) @@ -115,42 +115,8 @@ POSTHOOK: Output: default@orcfile_merge3b POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: DESC FORMATTED orcfile_merge3b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3b -POSTHOOK: query: DESC FORMATTED orcfile_merge3b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3b -# col_name data_type comment - -key int -value string - -# Detailed Table Information -Database: default +Found 1 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1000 - rawDataSize 94000 - totalSize 2538 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) FROM orcfile_merge3a Index: ql/src/test/results/clientpositive/tez/orc_merge4.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge4.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge4.q.out (working copy) @@ -36,47 +36,8 @@ POSTHOOK: Output: default@orcfile_merge3a@ds=1 POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3a -POSTHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string - -# Detailed Partition Information -Partition Value: [1] -Database: default -Table: orcfile_merge3a +Found 1 items #### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 500 - rawDataSize 47000 - totalSize 2496 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') SELECT * FROM src PREHOOK: type: QUERY @@ -101,88 +62,10 @@ POSTHOOK: Output: default@orcfile_merge3a@ds=2 POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3a -POSTHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string - -# Detailed Partition Information -Partition Value: [1] -Database: default -Table: orcfile_merge3a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 500 - rawDataSize 47000 - totalSize 2496 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='2') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orcfile_merge3a -POSTHOOK: query: DESC FORMATTED orcfile_merge3a PARTITION (ds='2') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orcfile_merge3a -# col_name data_type comment - -key int -value string - -# Partition Information -# col_name data_type comment - -ds string - -# Detailed Partition Information -Partition Value: [2] -Database: default -Table: orcfile_merge3a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 500 - rawDataSize 47000 - totalSize 2496 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b SELECT key, value FROM orcfile_merge3a PREHOOK: type: QUERY Index: ql/src/test/results/clientpositive/tez/orc_merge5.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge5.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge5.q.out (working copy) @@ -104,45 +104,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 3 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -274,45 +237,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 1 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -345,45 +271,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 3 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -438,45 +327,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 1 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b Index: ql/src/test/results/clientpositive/tez/orc_merge6.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge6.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge6.q.out (working copy) @@ -134,96 +134,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 3 items #### A masked pattern was here #### -Protect Mode: None +Found 3 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -398,96 +312,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -560,96 +388,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 3 items #### A masked pattern was here #### -Protect Mode: None +Found 3 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 3 - rawDataSize 765 - totalSize 1133 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -742,96 +484,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2000, 24] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(year="2001",hour=24) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -year string -hour int - -# Detailed Partition Information -Partition Value: [2001, 24] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 3 - rawDataSize 765 - totalSize 899 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a Index: ql/src/test/results/clientpositive/tez/orc_merge7.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge7.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge7.q.out (working copy) @@ -167,94 +167,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 2 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 2 - numRows 2 - rawDataSize 510 - totalSize 1044 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -463,94 +379,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 2 - rawDataSize 510 - totalSize 838 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -662,94 +494,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 2 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 2 - numRows 2 - rawDataSize 510 - totalSize 1044 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -844,94 +592,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 1 items #### A masked pattern was here #### -Protect Mode: None +Found 1 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 1 - rawDataSize 255 - totalSize 513 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 1 - numRows 2 - rawDataSize 510 - totalSize 838 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a Index: ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out (working copy) @@ -155,45 +155,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 5 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 5 - numRows 15 - rawDataSize 3825 - totalSize 2877 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b @@ -235,45 +198,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: desc formatted orc_merge5b -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5b -POSTHOOK: query: desc formatted orc_merge5b -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5b -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Detailed Table Information -Database: default +Found 3 items #### A masked pattern was here #### -Protect Mode: None -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 15 - rawDataSize 3825 - totalSize 2340 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b Index: ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out (working copy) @@ -225,94 +225,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 4 items #### A masked pattern was here #### -Protect Mode: None +Found 4 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 4 - numRows 4 - rawDataSize 1020 - totalSize 2060 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 4 - numRows 8 - rawDataSize 2040 - totalSize 2188 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a @@ -414,94 +330,10 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [80.0] -Database: default -Table: orc_merge5a +Found 3 items #### A masked pattern was here #### -Protect Mode: None +Found 3 items #### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 4 - rawDataSize 1020 - totalSize 1819 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@orc_merge5a -# col_name data_type comment - -userid bigint -string1 string -subtype double -decimal1 decimal(10,0) -ts timestamp - -# Partition Information -# col_name data_type comment - -st double - -# Detailed Partition Information -Partition Value: [0.8] -Database: default -Table: orc_merge5a -#### A masked pattern was here #### -Protect Mode: None -#### A masked pattern was here #### -Partition Parameters: - COLUMN_STATS_ACCURATE true - numFiles 3 - numRows 8 - rawDataSize 2040 - totalSize 1928 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde -InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 PREHOOK: query: show partitions orc_merge5a PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@orc_merge5a Index: ql/src/test/results/clientpositive/tez/ptf.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/ptf.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/ptf.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- SORT_QUERY_RESULTS --1. test1 Index: ql/src/test/results/clientpositive/tez/subquery_in.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/subquery_in.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/subquery_in.q.out (working copy) @@ -1,110 +1,14 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: -- non agg, non corr +-- non agg, non corr explain select * from src where src.key in (select key from src s1 where s1.key > '9') PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- non agg, non corr explain select * from src @@ -370,40 +274,40 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Map 5 Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(p_size) is not null (type: boolean) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(p_size) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(p_size) (type: double) - Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_size (type: int) Reducer 2 Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 <= 2) (type: boolean) - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 10 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col0) mode: hash @@ -446,14 +350,14 @@ 0 {VALUE._col1} {VALUE._col5} 1 outputColumnNames: _col1, _col5 - Statistics: Num rows: 16 Data size: 1744 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 16 Data size: 1744 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 16 Data size: 1744 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -522,26 +426,26 @@ Map Operator Tree: TableScan alias: b - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (p_size is not null and p_mfgr is not null) (type: boolean) - Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_size (type: int), p_mfgr (type: string) sort order: ++ Map-reduce partition columns: p_size (type: int), p_mfgr (type: string) - Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string) Map 3 Map Operator Tree: TableScan alias: part - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_size (type: int) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_mfgr (type: string), p_size (type: int) Reducer 2 Reduce Operator Tree: @@ -552,14 +456,14 @@ 0 {VALUE._col1} {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 4 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 930 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -567,27 +471,27 @@ Reducer 4 Reduce Operator Tree: Extract - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 5 Data size: 528 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int) Reducer 5 Reduce Operator Tree: @@ -596,24 +500,24 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col1 is not null (type: boolean) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 2 Data size: 211 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE Stage: Stage-0 Fetch Operator @@ -845,60 +749,60 @@ Map Operator Tree: TableScan alias: li - Statistics: Num rows: 756 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_partkey is not null and l_orderkey is not null) and (l_linenumber = 1)) (type: boolean) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: l_partkey (type: int) sort order: + Map-reduce partition columns: l_partkey (type: int) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE value expressions: l_orderkey (type: int), l_suppkey (type: int) Map 4 Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: l_partkey is not null (type: boolean) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_partkey (type: int) outputColumnNames: l_partkey - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: l_partkey (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Map 6 Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 1728 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_shipmode = 'AIR') and l_orderkey is not null) (type: boolean) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_orderkey (type: int) outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator @@ -908,12 +812,12 @@ 0 {KEY.reducesinkkey0} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: int) sort order: + Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col3 (type: int) Reducer 3 Reduce Operator Tree: @@ -924,14 +828,14 @@ 0 {VALUE._col0} {VALUE._col2} 1 outputColumnNames: _col0, _col3 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -942,16 +846,16 @@ keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/tez/temp_table.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/temp_table.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/temp_table.q.out (working copy) @@ -353,9 +353,15 @@ bar bay baz +cbo_t1 +cbo_t2 +cbo_t3 foo +lineitem +part src src1 +src_cbo src_json src_sequencefile src_thrift Index: ql/src/test/results/clientpositive/tez/union5.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/union5.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/union5.q.out (working copy) @@ -88,14 +88,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/tez/union7.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/union7.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/union7.q.out (working copy) @@ -92,14 +92,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out (working copy) @@ -0,0 +1,173 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dc (type: decimal(38,18)) + outputColumnNames: dc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(dc), max(dc), sum(dc), avg(dc) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col3 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), sum(VALUE._col2), avg(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col3 (type: decimal(38,18)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +-4997414117561.546875 4994550248722.298828 -10252745435816.024410 -5399023399.587163986308583465 Index: ql/src/test/results/clientpositive/tez/vector_between_in.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_between_in.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/vector_between_in.q.out (working copy) @@ -718,15 +718,15 @@ 14.9324324324 19.1135135135 20.3081081081 -22.1000000000 +22.1 24.4891891892 33.4486486486 34.6432432432 40.0189189189 42.4081081081 43.0054054054 -44.2000000000 -44.2000000000 +44.2 +44.2 44.7972972973 45.9918918919 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 Index: ql/src/test/results/clientpositive/tez/vector_coalesce.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_coalesce.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_coalesce.q.out (working copy) @@ -0,0 +1,196 @@ +PREHOOK: query: EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) +FROM alltypesorc +WHERE (cdouble IS NULL) LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) +FROM alltypesorc +WHERE (cdouble IS NULL) LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: cdouble is null (type: boolean) + Select Operator + expressions: null (type: void), cstring1 (type: string), cint (type: int), cfloat (type: float), csmallint (type: smallint), COALESCE(null,cstring1,cint,cfloat,csmallint) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) +FROM alltypesorc +WHERE (cdouble IS NULL) LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) +FROM alltypesorc +WHERE (cdouble IS NULL) LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL LFgU5WT87C2yJ4W4YU0r8Pp -285355633 -51.0 NULL LFgU5WT87C2yJ4W4YU0r8Pp +NULL 75bFXC7TqGo1SEaYAx4C58m NULL -51.0 NULL 75bFXC7TqGo1SEaYAx4C58m +NULL v3p153e2bSkGS70v04G 354670578 -51.0 NULL v3p153e2bSkGS70v04G +NULL 0pOH7A4O8aQ37NuBqn 951003458 -51.0 NULL 0pOH7A4O8aQ37NuBqn +NULL 8ShAFcD734S8Q26WjMwpq0Q 164554497 -51.0 NULL 8ShAFcD734S8Q26WjMwpq0Q +NULL nOF31ehjY7ULCHMf 455419170 -51.0 NULL nOF31ehjY7ULCHMf +NULL t32s57Cjt4a250qQgVNAB5T -109813638 -51.0 NULL t32s57Cjt4a250qQgVNAB5T +NULL nvO822k30OaH37Il 665801232 -51.0 NULL nvO822k30OaH37Il +NULL M152O -601502867 -51.0 NULL M152O +NULL FgJ7Hft6845s1766oyt82q 199879534 -51.0 NULL FgJ7Hft6845s1766oyt82q +PREHOOK: query: EXPLAIN SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) +FROM alltypesorc +WHERE (ctinyint IS NULL) LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) +FROM alltypesorc +WHERE (ctinyint IS NULL) LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: ctinyint is null (type: boolean) + Select Operator + expressions: ctinyint (type: tinyint), cdouble (type: double), cint (type: int), COALESCE((ctinyint + 10),(cdouble + log2(cint)),0) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) +FROM alltypesorc +WHERE (ctinyint IS NULL) LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) +FROM alltypesorc +WHERE (ctinyint IS NULL) LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL -4213.0 528534767 -4184.022576865738 +NULL -3012.0 528534767 -2983.0225768657383 +NULL -4016.0 528534767 -3987.0225768657383 +NULL -11534.0 528534767 -11505.022576865738 +NULL -6147.0 528534767 -6118.022576865738 +NULL -7680.0 528534767 -7651.022576865738 +NULL -7314.0 528534767 -7285.022576865738 +NULL 11254.0 528534767 11282.977423134262 +NULL 13889.0 528534767 13917.977423134262 +NULL 3321.0 528534767 3349.9774231342617 +PREHOOK: query: EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) +FROM alltypesorc +WHERE (cfloat IS NULL AND cbigint IS NULL) LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) +FROM alltypesorc +WHERE (cfloat IS NULL AND cbigint IS NULL) LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: (cfloat is null and cbigint is null) (type: boolean) + Select Operator + expressions: null (type: void), null (type: void), COALESCE(null,null,0) (type: float) + outputColumnNames: _col0, _col1, _col2 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) +FROM alltypesorc +WHERE (cfloat IS NULL AND cbigint IS NULL) LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) +FROM alltypesorc +WHERE (cfloat IS NULL AND cbigint IS NULL) LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +NULL NULL 0 +PREHOOK: query: EXPLAIN SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) +FROM alltypesorc +WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) +FROM alltypesorc +WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: (ctimestamp1 is not null or ctimestamp2 is not null) (type: boolean) + Select Operator + expressions: ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), COALESCE(ctimestamp1,ctimestamp2) (type: timestamp) + outputColumnNames: _col0, _col1, _col2 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) +FROM alltypesorc +WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) +FROM alltypesorc +WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 1969-12-31 15:59:46.674 +NULL 1969-12-31 16:00:13.589 1969-12-31 16:00:13.589 +1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 1969-12-31 15:59:55.787 +1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 1969-12-31 15:59:44.187 +1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 1969-12-31 15:59:50.434 +1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 1969-12-31 16:00:15.007 +1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 1969-12-31 16:00:07.021 +1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 1969-12-31 16:00:04.963 +1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 1969-12-31 15:59:52.176 +1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 1969-12-31 15:59:44.569 Index: ql/src/test/results/clientpositive/tez/vector_decimal_1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_1.q.out (working copy) @@ -0,0 +1,591 @@ +PREHOOK: query: drop table if exists decimal_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists decimal_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_1 +POSTHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_1 +PREHOOK: query: desc decimal_1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@decimal_1 +POSTHOOK: query: desc decimal_1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@decimal_1 +t decimal(4,2) +u decimal(5,0) +v decimal(10,0) +PREHOOK: query: insert overwrite table decimal_1 + select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@decimal_1 +POSTHOOK: query: insert overwrite table decimal_1 + select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@decimal_1 +POSTHOOK: Lineage: decimal_1.t EXPRESSION [] +POSTHOOK: Lineage: decimal_1.u EXPRESSION [] +POSTHOOK: Lineage: decimal_1.v EXPRESSION [] +PREHOOK: query: explain +select cast(t as boolean) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as boolean) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToBoolean(t) (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as boolean) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as boolean) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +true +PREHOOK: query: explain +select cast(t as tinyint) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as tinyint) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToByte(t) (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as tinyint) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as tinyint) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as smallint) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as smallint) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToShort(t) (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as smallint) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as smallint) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as int) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as int) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(t) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as int) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as int) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as bigint) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as bigint) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToLong(t) (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as bigint) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as bigint) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as float) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as float) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToFloat(t) (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as float) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as float) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as double) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as double) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(t) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as double) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as double) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as string) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as string) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToString(t) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as string) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as string) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as timestamp) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as timestamp) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS TIMESTAMP) (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: timestamp) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as timestamp) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as timestamp) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +1969-12-31 16:00:17.29 +PREHOOK: query: drop table decimal_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_1 +PREHOOK: Output: default@decimal_1 +POSTHOOK: query: drop table decimal_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_1 +POSTHOOK: Output: default@decimal_1 Index: ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out (working copy) @@ -0,0 +1,112 @@ +PREHOOK: query: DROP TABLE IF EXISTS decimal_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS decimal_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS decimal +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS decimal +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE decimal_txt (dec decimal) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_txt +POSTHOOK: query: CREATE TABLE decimal_txt (dec decimal) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_txt +PREHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_txt +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL +POSTHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_txt +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL +PREHOOK: query: EXPLAIN +SELECT dec FROM DECIMAL order by dec +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT dec FROM DECIMAL order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal + Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT dec FROM DECIMAL order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec FROM DECIMAL order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal +#### A masked pattern was here #### +NULL +1000000000 +PREHOOK: query: DROP TABLE DECIMAL_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_txt +PREHOOK: Output: default@decimal_txt +POSTHOOK: query: DROP TABLE DECIMAL_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_txt +POSTHOOK: Output: default@decimal_txt +PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal +PREHOOK: Output: default@decimal +POSTHOOK: query: DROP TABLE DECIMAL +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal +POSTHOOK: Output: default@decimal Index: ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out (working copy) @@ -0,0 +1,1676 @@ +PREHOOK: query: drop table decimal_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table decimal_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_2 +PREHOOK: query: insert overwrite table decimal_2 + select cast('17.29' as decimal(4,2)) from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: insert overwrite table decimal_2 + select cast('17.29' as decimal(4,2)) from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@decimal_2 +POSTHOOK: Lineage: decimal_2.t EXPRESSION [] +PREHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToBoolean(t) (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +true +PREHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToByte(t) (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToShort(t) (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(t) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToLong(t) (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToFloat(t) (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(t) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToString(t) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17.29 +PREHOOK: query: insert overwrite table decimal_2 + select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: insert overwrite table decimal_2 + select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@decimal_2 +POSTHOOK: Lineage: decimal_2.t EXPRESSION [] +PREHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToBoolean(t) (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +true +PREHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToByte(t) (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +13 +PREHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToShort(t) (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +-3827 +PREHOOK: query: explain +select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(t) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045 +PREHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToLong(t) (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045 +PREHOOK: query: explain +select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToFloat(t) (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045.5 +PREHOOK: query: explain +select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(t) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045.5044003 +PREHOOK: query: explain +select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToString(t) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045.5044003 +PREHOOK: query: explain +select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3.14 AS decimal(4,2)) (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(4,2)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3.14 +PREHOOK: query: explain +select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3.14 AS decimal(4,2)) (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(4,2)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3.14 +PREHOOK: query: explain +select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 2012-12-19 11:12:19.1234567 AS decimal(30,8)) (type: decimal(30,8)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(30,8)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(30,8)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +1355944339.1234567 +PREHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( true AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( true AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(true as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(true as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +1 +PREHOOK: query: explain +select cast(3Y as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3Y as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3Y as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3Y as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(3S as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3S as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3S as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3S as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(3L as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3L as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3L as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3L as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 1.0 AS decimal(20,19)) (type: decimal(20,19)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(20,19)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(20,19)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +1.0 +PREHOOK: query: explain +select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( '0.99999999999999999999' AS decimal(20,20)) (type: decimal(20,20)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(20,20)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(20,20)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +0.99999999999999999999 +PREHOOK: query: drop table decimal_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_2 +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: drop table decimal_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_2 +POSTHOOK: Output: default@decimal_2 Index: ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out (working copy) @@ -0,0 +1,390 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_3_txt +POSTHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_3_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_3_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_3_txt +PREHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_3_txt +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_3 +POSTHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_3 +PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-1.12 -1 +-0.333 0 +-0.33 0 +-0.3 0 +0.000000000000000000 0 +0 0 +0 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +1 1 +1.0 1 +1.000000000000000000 1 +1.12 1 +1.122 1 +2 2 +2 2 +3.14 3 +3.14 3 +3.14 3 +3.140 4 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +1234567890.1234567800 1234567890 +200 200 +125.2 125 +124.00 124 +100 100 +20 20 +10 10 +3.140 4 +3.14 3 +3.14 3 +3.14 3 +2 2 +2 2 +1.122 1 +1.12 1 +1.000000000000000000 1 +1.0 1 +1 1 +0.333 0 +0.33 0 +0.3 0 +0.2 0 +0.1 0 +0.02 0 +0.01 0 +0 0 +0 0 +0.000000000000000000 0 +-0.3 0 +-0.33 0 +-0.333 0 +-1.12 -1 +-1.12 -1 +-1.122 -11 +-1255.49 -1255 +-4400 4400 +-1234567890.1234567890 -1234567890 +NULL 0 +PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-1.12 -1 +-0.333 0 +-0.33 0 +-0.3 0 +0.000000000000000000 0 +0 0 +0 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +1 1 +1.0 1 +1.000000000000000000 1 +1.12 1 +1.122 1 +2 2 +2 2 +3.14 3 +3.14 3 +3.14 3 +3.140 4 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL +-1234567890.1234567890 +-4400 +-1255.49 +-1.122 +-1.12 +-0.333 +-0.33 +-0.3 +0.000000000000000000 +0.01 +0.02 +0.1 +0.2 +0.3 +0.33 +0.333 +1 +1.12 +1.122 +2 +3.14 +10 +20 +100 +124.00 +125.2 +200 +1234567890.1234567800 +PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -2 +-0.333 0 +-0.33 0 +-0.3 0 +0.000000000000000000 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +1 3 +1.12 1 +1.122 1 +2 4 +3.14 13 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +-1234567890 -1234567890.1234567890 +-1255 -1255.49 +-11 -1.122 +-1 -2.24 +0 0.330000000000000000 +1 5.242000000000000000 +2 4 +3 9.42 +4 3.140 +10 10 +20 20 +100 100 +124 124.00 +125 125.2 +200 200 +4400 -4400 +1234567890 1234567890.1234567800 +PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +-1234567890.1234567890 -1234567890 -1234567890.1234567890 -1234567890 +-4400 4400 -4400 4400 +-1255.49 -1255 -1255.49 -1255 +-1.122 -11 -1.122 -11 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-0.333 0 -0.333 0 +-0.33 0 -0.33 0 +-0.3 0 -0.3 0 +0.000000000000000000 0 0.000000000000000000 0 +0.000000000000000000 0 0 0 +0.000000000000000000 0 0 0 +0 0 0 0 +0 0 0.000000000000000000 0 +0 0 0 0 +0 0 0.000000000000000000 0 +0 0 0 0 +0 0 0 0 +0.01 0 0.01 0 +0.02 0 0.02 0 +0.1 0 0.1 0 +0.2 0 0.2 0 +0.3 0 0.3 0 +0.33 0 0.33 0 +0.333 0 0.333 0 +1 1 1 1 +1 1 1.0 1 +1 1 1.000000000000000000 1 +1.0 1 1.000000000000000000 1 +1.0 1 1.0 1 +1.0 1 1 1 +1.000000000000000000 1 1.000000000000000000 1 +1.000000000000000000 1 1 1 +1.000000000000000000 1 1.0 1 +1.12 1 1.12 1 +1.122 1 1.122 1 +2 2 2 2 +2 2 2 2 +2 2 2 2 +2 2 2 2 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.140 4 +3.14 3 3.140 4 +3.14 3 3.140 4 +3.140 4 3.14 3 +3.140 4 3.14 3 +3.140 4 3.14 3 +3.140 4 3.140 4 +10 10 10 10 +20 20 20 20 +100 100 100 100 +124.00 124 124.00 124 +125.2 125 125.2 125 +200 200 200 200 +1234567890.1234567800 1234567890 1234567890.1234567800 1234567890 +PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +3.14 3 +3.14 3 +3.14 3 +3.140 4 +PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +3.14 3 +3.14 3 +3.14 3 +3.140 4 +PREHOOK: query: DROP TABLE DECIMAL_3_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_3_txt +PREHOOK: Output: default@decimal_3_txt +POSTHOOK: query: DROP TABLE DECIMAL_3_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Output: default@decimal_3_txt +PREHOOK: query: DROP TABLE DECIMAL_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_3 +PREHOOK: Output: default@decimal_3 +POSTHOOK: query: DROP TABLE DECIMAL_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_3 +POSTHOOK: Output: default@decimal_3 Index: ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out (working copy) @@ -0,0 +1,250 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_4_1 +POSTHOOK: query: CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_4_1 +PREHOOK: query: CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_4_2 +POSTHOOK: query: CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_4_2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_4_1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_4_1 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_1 +PREHOOK: Output: default@decimal_4_2 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_1 +POSTHOOK: Output: default@decimal_4_2 +POSTHOOK: Lineage: decimal_4_2.key SIMPLE [(decimal_4_1)decimal_4_1.FieldSchema(name:key, type:decimal(35,25), comment:null), ] +POSTHOOK: Lineage: decimal_4_2.value EXPRESSION [(decimal_4_1)decimal_4_1.FieldSchema(name:key, type:decimal(35,25), comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_4_1 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_1 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_1 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-1.12 -1 +-0.333 0 +-0.33 0 +-0.3 0 +0.0000000000000000000000000 0 +0 0 +0 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +0.9999999999999999999999999 1 +1 1 +1.0 1 +1.12 1 +1.122 1 +2 2 +2 2 +3.14 3 +3.14 3 +3.14 3 +3.140 4 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +NULL NULL +-1234567890.1234567890 -3703703670.3703703670 +-4400 -13200 +-1255.49 -3766.47 +-1.122 -3.366 +-1.12 -3.36 +-1.12 -3.36 +-0.333 -0.999 +-0.33 -0.99 +-0.3 -0.9 +0.0000000000000000000000000 0.0000000000000000000000000 +0 0 +0 0 +0.01 0.03 +0.02 0.06 +0.1 0.3 +0.2 0.6 +0.3 0.9 +0.33 0.99 +0.333 0.999 +0.9999999999999999999999999 2.9999999999999999999999997 +1 3 +1.0 3.0 +1.12 3.36 +1.122 3.366 +2 6 +2 6 +3.14 9.42 +3.14 9.42 +3.14 9.42 +3.140 9.420 +10 30 +20 60 +100 300 +124.00 372.00 +125.2 375.6 +200 600 +1234567890.1234567800 3703703670.3703703400 +PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +NULL NULL +-1234567890.1234567890 -3703703670.3703703670 +-4400 -13200 +-1255.49 -3766.47 +-1.122 -3.366 +-1.12 -3.36 +-1.12 -3.36 +-0.333 -0.999 +-0.33 -0.99 +-0.3 -0.9 +0.0000000000000000000000000 0.0000000000000000000000000 +0 0 +0 0 +0.01 0.03 +0.02 0.06 +0.1 0.3 +0.2 0.6 +0.3 0.9 +0.33 0.99 +0.333 0.999 +0.9999999999999999999999999 2.9999999999999999999999997 +1 3 +1.0 3.0 +1.12 3.36 +1.122 3.366 +2 6 +2 6 +3.14 9.42 +3.14 9.42 +3.14 9.42 +3.140 9.420 +10 30 +20 60 +100 300 +124.00 372.00 +125.2 375.6 +200 600 +1234567890.1234567800 3703703670.3703703400 +PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +NULL NULL +-1234567890.1234567890 -3703703670.3703703670 +-4400 -13200 +-1255.49 -3766.47 +-1.122 -3.366 +-1.12 -3.36 +-1.12 -3.36 +-0.333 -0.999 +-0.33 -0.99 +-0.3 -0.9 +0.0000000000000000000000000 0.0000000000000000000000000 +0 0 +0 0 +0.01 0.03 +0.02 0.06 +0.1 0.3 +0.2 0.6 +0.3 0.9 +0.33 0.99 +0.333 0.999 +0.9999999999999999999999999 2.9999999999999999999999997 +1 3 +1.0 3.0 +1.12 3.36 +1.122 3.366 +2 6 +2 6 +3.14 9.42 +3.14 9.42 +3.14 9.42 +3.140 9.420 +10 30 +20 60 +100 300 +124.00 372.00 +125.2 375.6 +200 600 +1234567890.1234567800 3703703670.3703703400 +PREHOOK: query: DROP TABLE DECIMAL_4_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_4_1 +PREHOOK: Output: default@decimal_4_1 +POSTHOOK: query: DROP TABLE DECIMAL_4_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_4_1 +POSTHOOK: Output: default@decimal_4_1 +PREHOOK: query: DROP TABLE DECIMAL_4_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_4_2 +PREHOOK: Output: default@decimal_4_2 +POSTHOOK: query: DROP TABLE DECIMAL_4_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_4_2 +POSTHOOK: Output: default@decimal_4_2 Index: ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out (working copy) @@ -0,0 +1,239 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_5_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_5_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_5_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_5_txt +POSTHOOK: query: CREATE TABLE DECIMAL_5_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_5_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_5_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_5_txt +PREHOOK: query: CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_5 +POSTHOOK: query: CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_5 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_5 SELECT * FROM DECIMAL_5_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5_txt +PREHOOK: Output: default@decimal_5 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_5 SELECT * FROM DECIMAL_5_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5_txt +POSTHOOK: Output: default@decimal_5 +POSTHOOK: Lineage: decimal_5.key SIMPLE [(decimal_5_txt)decimal_5_txt.FieldSchema(name:key, type:decimal(10,5), comment:null), ] +POSTHOOK: Lineage: decimal_5.value SIMPLE [(decimal_5_txt)decimal_5_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: SELECT key FROM DECIMAL_5 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +NULL +NULL +NULL +-4400 +-1255.49 +-1.122 +-1.12 +-1.12 +-0.333 +-0.33 +-0.3 +0.00000 +0 +0 +0.01 +0.02 +0.1 +0.2 +0.3 +0.33 +0.333 +1 +1.0 +1.00000 +1.12 +1.122 +2 +2 +3.14 +3.14 +3.14 +3.140 +10 +20 +100 +124.00 +125.2 +200 +PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +NULL +-4400 +-1255.49 +-1.122 +-1.12 +-0.333 +-0.33 +-0.3 +0.00000 +0.01 +0.02 +0.1 +0.2 +0.3 +0.33 +0.333 +1 +1.12 +1.122 +2 +3.14 +10 +20 +100 +124.00 +125.2 +200 +PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +-4400 +NULL +0 +0 +100 +10 +1 +0 +0 +200 +20 +2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +3 +-1 +-1 +-1 +1 +1 +124 +125 +-1255 +3 +3 +3 +1 +NULL +NULL +PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +NULL +NULL +0.000 +0 +100 +10 +1 +0.1 +0.01 +200 +20 +2 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +1.0 +2 +3.14 +-1.12 +-1.12 +-1.122 +1.12 +1.122 +124.00 +125.2 +NULL +3.14 +3.14 +3.140 +1.000 +NULL +NULL +PREHOOK: query: DROP TABLE DECIMAL_5_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_5_txt +PREHOOK: Output: default@decimal_5_txt +POSTHOOK: query: DROP TABLE DECIMAL_5_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_5_txt +POSTHOOK: Output: default@decimal_5_txt +PREHOOK: query: DROP TABLE DECIMAL_5 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_5 +PREHOOK: Output: default@decimal_5 +POSTHOOK: query: DROP TABLE DECIMAL_5 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_5 +POSTHOOK: Output: default@decimal_5 Index: ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out (working copy) @@ -0,0 +1,303 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_1_txt +POSTHOOK: query: CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_1_txt +PREHOOK: query: CREATE TABLE DECIMAL_6_2_txt(key decimal(17,4), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_2_txt +POSTHOOK: query: CREATE TABLE DECIMAL_6_2_txt(key decimal(17,4), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_2_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_6_1_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_6_1_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_6_2_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_6_2_txt +PREHOOK: query: CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_1 +POSTHOOK: query: CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_1 +PREHOOK: query: CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_2 +POSTHOOK: query: CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_2 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_1 SELECT * FROM DECIMAL_6_1_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_1_txt +PREHOOK: Output: default@decimal_6_1 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_1 SELECT * FROM DECIMAL_6_1_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_1_txt +POSTHOOK: Output: default@decimal_6_1 +POSTHOOK: Lineage: decimal_6_1.key SIMPLE [(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:key, type:decimal(10,5), comment:null), ] +POSTHOOK: Lineage: decimal_6_1.value SIMPLE [(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_2 SELECT * FROM DECIMAL_6_2_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_2_txt +PREHOOK: Output: default@decimal_6_2 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_2 SELECT * FROM DECIMAL_6_2_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_2_txt +POSTHOOK: Output: default@decimal_6_2 +POSTHOOK: Lineage: decimal_6_2.key SIMPLE [(decimal_6_2_txt)decimal_6_2_txt.FieldSchema(name:key, type:decimal(17,4), comment:null), ] +POSTHOOK: Lineage: decimal_6_2.value SIMPLE [(decimal_6_2_txt)decimal_6_2_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_6_1 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_6_1 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_1 +#### A masked pattern was here #### +NULL -1234567890 +NULL 0 +NULL 3 +NULL 4 +NULL 1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-0.333 0 +-0.3 0 +0.00000 0 +0 0 +0.333 0 +1.0 1 +1.00000 1 +1.12 1 +1.122 1 +2 2 +3.14 3 +3.14 3 +3.140 4 +10 10 +10.73433 5 +124.00 124 +125.2 125 +23232.23435 2 +PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +NULL 0 +-1234567890.1235 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-0.333 0 +-0.3 0 +0.0000 0 +0 0 +0.333 0 +1.0 1 +1.0000 1 +1.12 1 +1.122 1 +2 2 +3.14 3 +3.14 3 +3.140 4 +10 10 +10.7343 5 +124.00 124 +125.2 125 +23232.2344 2 +2389432.2375 3 +2389432.2375 4 +1234567890.1235 1234567890 +PREHOOK: query: SELECT T.key from ( + SELECT key, value from DECIMAL_6_1 + UNION ALL + SELECT key, value from DECIMAL_6_2 +) T order by T.key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_1 +PREHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT T.key from ( + SELECT key, value from DECIMAL_6_1 + UNION ALL + SELECT key, value from DECIMAL_6_2 +) T order by T.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_1 +POSTHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +-1234567890.1235 +-4400 +-4400 +-1255.49 +-1255.49 +-1.122 +-1.122 +-1.12 +-1.12 +-0.333 +-0.333 +-0.3 +-0.3 +0.00000 +0.0000 +0 +0 +0.333 +0.333 +1.0 +1.0 +1.0000 +1.00000 +1.12 +1.12 +1.122 +1.122 +2 +2 +3.14 +3.14 +3.14 +3.14 +3.140 +3.140 +10 +10 +10.7343 +10.73433 +124.00 +124.00 +125.2 +125.2 +23232.23435 +23232.2344 +2389432.2375 +2389432.2375 +1234567890.1235 +PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_6_1 +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_3 +POSTHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_6_1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_3 +PREHOOK: query: desc DECIMAL_6_3 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@decimal_6_3 +POSTHOOK: query: desc DECIMAL_6_3 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@decimal_6_3 +k double +v int +PREHOOK: query: SELECT * FROM DECIMAL_6_3 ORDER BY k, v +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_6_3 ORDER BY k, v +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_3 +#### A masked pattern was here #### +NULL -695344902 +NULL 0 +NULL 33 +NULL 44 +NULL 695344902 +-4394.5 48400 +-1249.99 -13805 +4.378 -121 +4.38 -11 +5.167 0 +5.2 0 +5.5 0 +5.5 0 +5.833 0 +6.5 11 +6.5 11 +6.62 11 +6.622 11 +7.5 22 +8.64 33 +8.64 33 +8.64 44 +15.5 110 +16.23433 55 +129.5 1364 +130.7 1375 +23237.73435 22 Index: ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out (working copy) @@ -113,14 +113,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 --3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 -762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 3 6984454.211097692 -617.56077692307690 6983219.08954384584620 -253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 1024 11697.96923076923100 -11712.99230769231000 -416182.64030769233089 -528534767 1024 5831542.2692483780 -9777.1594594595 11646372.8607481068 1024 6984454.21109769200000 -11710.13076923077100 13948892.79980307629003 -626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 1024 11645.74615384615400 -11712.27692307692300 12625.04759999997746 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 +-3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 +-563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 +762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 +6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 +253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 +528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 +626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 PREHOOK: query: -- Now add the others... EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1), @@ -217,11 +217,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 1956.576923076922966667 6821.495748565159 6822.606289190924 --3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 -2325.50327307692295 1707.9424961538462 2415.395441814127 -762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 3493144.07839499984625 3491310.1327026924 4937458.140118758 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.56077692307690 6983219.08954384584620 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 -253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.96923076923100 -11712.99230769231000 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 -528534767 1024 5831542.2692483780 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.21109769200000 -11710.13076923077100 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 -626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.74615384615400 -11712.27692307692300 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 1956.576923076922966667 6821.495748565159 6822.606289190924 +-3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 +-563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 +762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 3493144.07839499984625 3491310.1327026924 4937458.140118758 +6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 +253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 +528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 +626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 Index: ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out (working copy) @@ -0,0 +1,41 @@ +PREHOOK: query: EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: (((cdouble is not null and cint is not null) and cboolean1 is not null) and ctimestamp1 is not null) (type: boolean) + Select Operator + expressions: cdouble (type: double), cint (type: int), cboolean1 (type: boolean), ctimestamp1 (type: timestamp), CAST( cdouble AS decimal(20,10)) (type: decimal(20,10)), CAST( cint AS decimal(23,14)) (type: decimal(23,14)), CAST( cboolean1 AS decimal(5,2)) (type: decimal(5,2)), CAST( ctimestamp1 AS decimal(15,0)) (type: decimal(15,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-13326.0 528534767 true 1969-12-31 15:59:46.674 -13326.0 528534767 1 -13 +-15813.0 528534767 true 1969-12-31 15:59:55.787 -15813.0 528534767 1 -4 +-9566.0 528534767 true 1969-12-31 15:59:44.187 -9566.0 528534767 1 -16 +15007.0 528534767 true 1969-12-31 15:59:50.434 15007.0 528534767 1 -10 +7021.0 528534767 true 1969-12-31 16:00:15.007 7021.0 528534767 1 15 +4963.0 528534767 true 1969-12-31 16:00:07.021 4963.0 528534767 1 7 +-7824.0 528534767 true 1969-12-31 16:00:04.963 -7824.0 528534767 1 5 +-15431.0 528534767 true 1969-12-31 15:59:52.176 -15431.0 528534767 1 -8 +-15549.0 528534767 true 1969-12-31 15:59:44.569 -15549.0 528534767 1 -15 +5780.0 528534767 true 1969-12-31 15:59:44.451 5780.0 528534767 1 -16 Index: ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out (working copy) @@ -0,0 +1,51 @@ +PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_test +POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_test +PREHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: decimal_test + Filter Operator + predicate: (((((cdecimal1 > 0) and (cdecimal1 < 12345.5678)) and (cdecimal2 <> 0)) and (cdecimal2 > 1000)) and cdouble is not null) (type: boolean) + Select Operator + expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((cdecimal1 + 2.34) / cdecimal2) (type: double), (cdecimal1 * (cdecimal2 / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_test +#### A masked pattern was here #### +19699.417463617423 -12507.913305613346 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 +9216.339708939685 -5851.806444906470 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 +6514.8403326403464 -4136.5212058211928 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 +7587.301455301477 -4817.467775467754 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 +19197.9729729730 -12189.5270270270 0.835155361813429 2.6880848817567654E7 5.4729729730 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 +17098.9945945946 -10856.8054054054 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 +12433.723076923077 -7894.646153846154 0.8352770361086894 1.12754688E7 7.6 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 +7247.316839916862 -4601.598544698524 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 +14757.1700623700465 -9369.8914760914930 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 +10964.832016631993 -6961.991060291086 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 Index: ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out (working copy) @@ -0,0 +1,211 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE decimal_mapjoin STORED AS ORC AS + SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, + CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, + cint + FROM alltypesorc +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_mapjoin +POSTHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE decimal_mapjoin STORED AS ORC AS + SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, + CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, + cint + FROM alltypesorc +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_mapjoin +PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 + FROM decimal_mapjoin l + JOIN decimal_mapjoin r ON l.cint = r.cint + WHERE l.cint = 6981 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 + FROM decimal_mapjoin l + JOIN decimal_mapjoin r ON l.cint = r.cint + WHERE l.cint = 6981 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: r + Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cint = 6981) (type: boolean) + Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: 6981 (type: int) + sort order: + + Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + value expressions: cdecimal2 (type: decimal(23,14)) + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: l + Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cint = 6981) (type: boolean) + Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {cdecimal1} + 1 {cdecimal2} + keys: + 0 6981 (type: int) + 1 6981 (type: int) + outputColumnNames: _col1, _col9 + input vertices: + 1 Map 1 + Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 + FROM decimal_mapjoin l + JOIN decimal_mapjoin r ON l.cint = r.cint + WHERE l.cint = 6981 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_mapjoin +#### A masked pattern was here #### +POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 + FROM decimal_mapjoin l + JOIN decimal_mapjoin r ON l.cint = r.cint + WHERE l.cint = 6981 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_mapjoin +#### A masked pattern was here #### +6981 6981 -515.6210729730 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 +6981 6981 -515.6210729730 6984454.211097692 +6981 6981 -515.6210729730 6984454.211097692 +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL +6981 6981 5831542.269248378 -617.5607769230769 +6981 6981 5831542.269248378 -617.5607769230769 +6981 6981 5831542.269248378 6984454.211097692 +6981 6981 5831542.269248378 NULL +6981 6981 5831542.269248378 NULL +6981 6981 5831542.269248378 NULL +6981 6981 5831542.269248378 NULL +6981 6981 5831542.269248378 NULL +6981 6981 5831542.269248378 NULL +6981 6981 5831542.269248378 NULL +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL -617.5607769230769 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL 6984454.211097692 +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL +6981 6981 NULL NULL Index: ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out (working copy) @@ -0,0 +1,192 @@ +PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_test +POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_test +PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,round(Exp(cdecimal1), 58) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +PREHOOK: type: QUERY +POSTHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,round(Exp(cdecimal1), 58) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: decimal_test + Filter Operator + predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean) + Select Operator + expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601.0)) (type: double), log(2.0, cdecimal1) (type: double), power(log2(cdecimal1), 2.0) (type: double), power(log2(cdecimal1), 2.0) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(38,18)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 + ListSink + +PREHOOK: query: select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,round(Exp(cdecimal1), 58) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_test +#### A masked pattern was here #### +POSTHOOK: query: select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,round(Exp(cdecimal1), 58) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_test +#### A masked pattern was here #### +-119.4594594595 -119.46 -119 -120 -119 1.316485E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.4594594595 -0.07885666683797002 NaN 0.9968859644388647 NaN -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL Index: ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out (working copy) @@ -0,0 +1,675 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION_txt(dec decimal(20,10)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_PRECISION_txt +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION_txt(dec decimal(20,10)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_PRECISION_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_precision_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_precision_txt +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION(dec decimal(20,10)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_PRECISION +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION(dec decimal(20,10)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_PRECISION +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_PRECISION SELECT * FROM DECIMAL_PRECISION_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision_txt +PREHOOK: Output: default@decimal_precision +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_PRECISION SELECT * FROM DECIMAL_PRECISION_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision_txt +POSTHOOK: Output: default@decimal_precision +POSTHOOK: Lineage: decimal_precision.dec SIMPLE [(decimal_precision_txt)decimal_precision_txt.FieldSchema(name:dec, type:decimal(20,10), comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +0.0000000000 +0.0000000000 +0.0000000000 +0.0000000000 +0 +0.1234567890 +0.1234567890 +1.2345678901 +1.2345678901 +1.2345678901 +12.3456789012 +12.3456789012 +12.3456789012 +123.4567890123 +123.4567890123 +123.4567890123 +1234.5678901235 +1234.5678901235 +1234.5678901235 +12345.6789012346 +12345.6789012346 +123456.7890123456 +123456.7890123457 +1234567.890123456 +1234567.8901234568 +12345678.90123456 +12345678.9012345679 +123456789.0123456 +123456789.0123456789 +1234567890.123456 +1234567890.1234567890 +PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0 1 -1 +0.1234567890 1.1234567890 -0.8765432110 +0.1234567890 1.1234567890 -0.8765432110 +1.2345678901 2.2345678901 0.2345678901 +1.2345678901 2.2345678901 0.2345678901 +1.2345678901 2.2345678901 0.2345678901 +12.3456789012 13.3456789012 11.3456789012 +12.3456789012 13.3456789012 11.3456789012 +12.3456789012 13.3456789012 11.3456789012 +123.4567890123 124.4567890123 122.4567890123 +123.4567890123 124.4567890123 122.4567890123 +123.4567890123 124.4567890123 122.4567890123 +1234.5678901235 1235.5678901235 1233.5678901235 +1234.5678901235 1235.5678901235 1233.5678901235 +1234.5678901235 1235.5678901235 1233.5678901235 +12345.6789012346 12346.6789012346 12344.6789012346 +12345.6789012346 12346.6789012346 12344.6789012346 +123456.7890123456 123457.7890123456 123455.7890123456 +123456.7890123457 123457.7890123457 123455.7890123457 +1234567.890123456 1234568.890123456 1234566.890123456 +1234567.8901234568 1234568.8901234568 1234566.8901234568 +12345678.90123456 12345679.90123456 12345677.90123456 +12345678.9012345679 12345679.9012345679 12345677.9012345679 +123456789.0123456 123456790.0123456 123456788.0123456 +123456789.0123456789 123456790.0123456789 123456788.0123456789 +1234567890.123456 1234567891.123456 1234567889.123456 +1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 +PREHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0 0 0 +0.1234567890 0.2469135780 0.041152263 +0.1234567890 0.2469135780 0.041152263 +1.2345678901 2.4691357802 0.411522630033 +1.2345678901 2.4691357802 0.411522630033 +1.2345678901 2.4691357802 0.411522630033 +12.3456789012 24.6913578024 4.1152263004 +12.3456789012 24.6913578024 4.1152263004 +12.3456789012 24.6913578024 4.1152263004 +123.4567890123 246.9135780246 41.1522630041 +123.4567890123 246.9135780246 41.1522630041 +123.4567890123 246.9135780246 41.1522630041 +1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 +12345.6789012346 24691.3578024692 4115.226300411533 +12345.6789012346 24691.3578024692 4115.226300411533 +123456.7890123456 246913.5780246912 41152.2630041152 +123456.7890123457 246913.5780246914 41152.263004115233 +1234567.890123456 2469135.780246912 411522.630041152 +1234567.8901234568 2469135.7802469136 411522.630041152267 +12345678.90123456 24691357.80246912 4115226.30041152 +12345678.9012345679 24691357.8024691358 4115226.300411522633 +123456789.0123456 246913578.0246912 41152263.0041152 +123456789.0123456789 246913578.0246913578 41152263.0041152263 +1234567890.123456 2469135780.246912 411522630.041152 +1234567890.1234567890 2469135780.2469135780 411522630.041152263 +PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0 0 +0.1234567890 0.013717421 +0.1234567890 0.013717421 +1.2345678901 0.137174210011 +1.2345678901 0.137174210011 +1.2345678901 0.137174210011 +12.3456789012 1.371742100133 +12.3456789012 1.371742100133 +12.3456789012 1.371742100133 +123.4567890123 13.717421001367 +123.4567890123 13.717421001367 +123.4567890123 13.717421001367 +1234.5678901235 137.174210013722 +1234.5678901235 137.174210013722 +1234.5678901235 137.174210013722 +12345.6789012346 1371.742100137178 +12345.6789012346 1371.742100137178 +123456.7890123456 13717.421001371733 +123456.7890123457 13717.421001371744 +1234567.890123456 137174.210013717333 +1234567.8901234568 137174.210013717422 +12345678.90123456 1371742.100137173333 +12345678.9012345679 1371742.100137174211 +123456789.0123456 13717421.001371733333 +123456789.0123456789 13717421.0013717421 +1234567890.123456 137174210.013717333333 +1234567890.1234567890 137174210.013717421 +PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0 0 +0.1234567890 0.0045724736667 +0.1234567890 0.0045724736667 +1.2345678901 0.0457247366704 +1.2345678901 0.0457247366704 +1.2345678901 0.0457247366704 +12.3456789012 0.4572473667111 +12.3456789012 0.4572473667111 +12.3456789012 0.4572473667111 +123.4567890123 4.5724736671222 +123.4567890123 4.5724736671222 +123.4567890123 4.5724736671222 +1234.5678901235 45.7247366712407 +1234.5678901235 45.7247366712407 +1234.5678901235 45.7247366712407 +12345.6789012346 457.2473667123926 +12345.6789012346 457.2473667123926 +123456.7890123456 4572.4736671239111 +123456.7890123457 4572.4736671239148 +1234567.890123456 45724.7366712391111 +1234567.8901234568 45724.7366712391407 +12345678.90123456 457247.3667123911111 +12345678.9012345679 457247.3667123914037 +123456789.0123456 4572473.6671239111111 +123456789.0123456789 4572473.6671239140333 +1234567890.123456 45724736.6712391111111 +1234567890.1234567890 45724736.6712391403333 +PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0 0 +0.1234567890 0.01524157875019052100 +0.1234567890 0.01524157875019052100 +1.2345678901 1.52415787526596567801 +1.2345678901 1.52415787526596567801 +1.2345678901 1.52415787526596567801 +12.3456789012 152.41578753153483936144 +12.3456789012 152.41578753153483936144 +12.3456789012 152.41578753153483936144 +123.4567890123 15241.57875322755800955129 +123.4567890123 15241.57875322755800955129 +123.4567890123 15241.57875322755800955129 +1234.5678901235 1524157.87532399036884525225 +1234.5678901235 1524157.87532399036884525225 +1234.5678901235 1524157.87532399036884525225 +12345.6789012346 152415787.53238916034140423716 +12345.6789012346 152415787.53238916034140423716 +123456.7890123456 15241578753.23881726870921383936 +123456.7890123457 15241578753.23884196006701630849 +1234567.890123456 1524157875323.881726870921383936 +1234567.8901234568 1524157875323.88370217954558146624 +12345678.90123456 152415787532388.1726870921383936 +12345678.9012345679 152415787532388.36774881877789971041 +123456789.0123456 15241578753238817.26870921383936 +123456789.0123456789 15241578753238836.75019051998750190521 +1234567890.123456 NULL +1234567890.1234567890 NULL +PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_precision + Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(20,10)) + outputColumnNames: dec + Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(dec), sum(dec) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: decimal(30,10)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(24,14)), _col1 (type: decimal(30,10)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +88499534.57586576220645 2743485571.8518386284 +PREHOOK: query: SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL +PREHOOK: query: SELECT * from DECIMAL_PRECISION WHERE dec > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from DECIMAL_PRECISION WHERE dec > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +PREHOOK: query: SELECT dec * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL +PREHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +12345678901234567890.12345678 +PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +75 +PREHOOK: query: DROP TABLE DECIMAL_PRECISION_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_precision_txt +PREHOOK: Output: default@decimal_precision_txt +POSTHOOK: query: DROP TABLE DECIMAL_PRECISION_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_precision_txt +POSTHOOK: Output: default@decimal_precision_txt +PREHOOK: query: DROP TABLE DECIMAL_PRECISION +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_precision +PREHOOK: Output: default@decimal_precision +POSTHOOK: query: DROP TABLE DECIMAL_PRECISION +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_precision +POSTHOOK: Output: default@decimal_precision Index: ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out (working copy) @@ -0,0 +1,456 @@ +PREHOOK: query: create table decimal_tbl_txt (dec decimal(10,0)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_txt +POSTHOOK: query: create table decimal_tbl_txt (dec decimal(10,0)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_txt +PREHOOK: query: insert into table decimal_tbl_txt values(101) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@decimal_tbl_txt +POSTHOOK: query: insert into table decimal_tbl_txt values(101) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@decimal_tbl_txt +POSTHOOK: Lineage: decimal_tbl_txt.dec EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +101 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by dec +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_txt + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(11,0)) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +101 100 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_txt + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,0)) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +101 100 +PREHOOK: query: create table decimal_tbl_rc (dec decimal(10,0)) +row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_rc +POSTHOOK: query: create table decimal_tbl_rc (dec decimal(10,0)) +row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_rc +PREHOOK: query: insert into table decimal_tbl_rc values(101) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@decimal_tbl_rc +POSTHOOK: query: insert into table decimal_tbl_rc values(101) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@decimal_tbl_rc +POSTHOOK: Lineage: decimal_tbl_rc.dec EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_rc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_rc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +101 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by dec +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_rc + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(11,0)) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +101 100 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_rc + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,0)) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +101 100 +PREHOOK: query: create table decimal_tbl_orc (dec decimal(10,0)) +stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_orc +POSTHOOK: query: create table decimal_tbl_orc (dec decimal(10,0)) +stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_orc +PREHOOK: query: insert into table decimal_tbl_orc values(101) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@decimal_tbl_orc +POSTHOOK: query: insert into table decimal_tbl_orc values(101) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@decimal_tbl_orc +POSTHOOK: Lineage: decimal_tbl_orc.dec EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +101 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by dec +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(11,0)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +101 100 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,0)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +101 100 Index: ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out (working copy) @@ -0,0 +1,499 @@ +PREHOOK: query: create table decimal_tbl_1_orc (dec decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_1_orc +POSTHOOK: query: create table decimal_tbl_1_orc (dec decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_1_orc +PREHOOK: query: insert into table decimal_tbl_1_orc values(55555) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@decimal_tbl_1_orc +POSTHOOK: query: insert into table decimal_tbl_1_orc values(55555) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@decimal_tbl_1_orc +POSTHOOK: Lineage: decimal_tbl_1_orc.dec EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_1_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_1_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +55555 +PREHOOK: query: -- EXPLAIN +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +EXPLAIN +SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +PREHOOK: type: QUERY +POSTHOOK: query: -- EXPLAIN +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +EXPLAIN +SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_1_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(dec) (type: decimal(21,0)), round(dec, 0) (type: decimal(21,0)), round(dec, 1) (type: decimal(22,1)), round(dec, 2) (type: decimal(23,2)), round(dec, 3) (type: decimal(24,3)), round(dec, -1) (type: decimal(21,0)), round(dec, -2) (type: decimal(21,0)), round(dec, -3) (type: decimal(21,0)), round(dec, -4) (type: decimal(21,0)), round(dec, -5) (type: decimal(21,0)), round(dec, -6) (type: decimal(21,0)), round(dec, -7) (type: decimal(21,0)), round(dec, -8) (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(21,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(22,1)), VALUE._col2 (type: decimal(23,2)), VALUE._col3 (type: decimal(24,3)), VALUE._col4 (type: decimal(21,0)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +55555 55555 55555.0 55555.00 55555.000 55560 55600 56000 60000 100000 0 0 0 +PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_2_orc +POSTHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_2_orc +PREHOOK: query: insert into table decimal_tbl_2_orc values(125.315, -125.315) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@decimal_tbl_2_orc +POSTHOOK: query: insert into table decimal_tbl_2_orc values(125.315, -125.315) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@decimal_tbl_2_orc +POSTHOOK: Lineage: decimal_tbl_2_orc.neg EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: decimal_tbl_2_orc.pos EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_2_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_2_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +125.315 -125.315 +PREHOOK: query: EXPLAIN +SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_2_orc + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(pos) (type: decimal(21,0)), round(pos, 0) (type: decimal(21,0)), round(pos, 1) (type: decimal(22,1)), round(pos, 2) (type: decimal(23,2)), round(pos, 3) (type: decimal(24,3)), round(pos, 4) (type: decimal(25,4)), round(pos, -1) (type: decimal(21,0)), round(pos, -2) (type: decimal(21,0)), round(pos, -3) (type: decimal(21,0)), round(pos, -4) (type: decimal(21,0)), round(neg) (type: decimal(21,0)), round(neg, 0) (type: decimal(21,0)), round(neg, 1) (type: decimal(22,1)), round(neg, 2) (type: decimal(23,2)), round(neg, 3) (type: decimal(24,3)), round(neg, 4) (type: decimal(25,4)), round(neg, -1) (type: decimal(21,0)), round(neg, -2) (type: decimal(21,0)), round(neg, -3) (type: decimal(21,0)), round(neg, -4) (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(21,0)) + sort order: + + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(25,4)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(22,1)), _col13 (type: decimal(23,2)), _col14 (type: decimal(24,3)), _col15 (type: decimal(25,4)), _col16 (type: decimal(21,0)), _col17 (type: decimal(21,0)), _col18 (type: decimal(21,0)), _col19 (type: decimal(21,0)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(22,1)), VALUE._col2 (type: decimal(23,2)), VALUE._col3 (type: decimal(24,3)), VALUE._col4 (type: decimal(25,4)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(22,1)), VALUE._col12 (type: decimal(23,2)), VALUE._col13 (type: decimal(24,3)), VALUE._col14 (type: decimal(25,4)), VALUE._col15 (type: decimal(21,0)), VALUE._col16 (type: decimal(21,0)), VALUE._col17 (type: decimal(21,0)), VALUE._col18 (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +125 125 125.3 125.32 125.315 125.3150 130 100 0 0 -125 -125 -125.3 -125.32 -125.315 -125.3150 -130 -100 0 0 +PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_3_orc +POSTHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_3_orc +PREHOOK: query: insert into table decimal_tbl_3_orc values(3.141592653589793) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@decimal_tbl_3_orc +POSTHOOK: query: insert into table decimal_tbl_3_orc values(3.141592653589793) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@decimal_tbl_3_orc +POSTHOOK: Lineage: decimal_tbl_3_orc.dec EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_3_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_3_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +3.141592653589793 +PREHOOK: query: EXPLAIN +SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_3_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(dec, -15) (type: decimal(21,0)), round(dec, -16) (type: decimal(21,0)), round(dec, -5) (type: decimal(21,0)), round(dec, -6) (type: decimal(21,0)), round(dec, -3) (type: decimal(21,0)), round(dec, -4) (type: decimal(21,0)), round(dec, -1) (type: decimal(21,0)), round(dec, -2) (type: decimal(21,0)), round(dec, 0) (type: decimal(21,0)), round(dec, 1) (type: decimal(22,1)), round(dec, 2) (type: decimal(23,2)), round(dec, 3) (type: decimal(24,3)), round(dec, -13) (type: decimal(21,0)), round(dec, 4) (type: decimal(25,4)), round(dec, 5) (type: decimal(26,5)), round(dec, 6) (type: decimal(27,6)), round(dec, 7) (type: decimal(28,7)), round(dec, 8) (type: decimal(29,8)), round(dec, 9) (type: decimal(30,9)), round(dec, 10) (type: decimal(31,10)), round(dec, 11) (type: decimal(32,11)), round(dec, 12) (type: decimal(33,12)), round(dec, 13) (type: decimal(34,13)), round(dec, -14) (type: decimal(21,0)), round(dec, 14) (type: decimal(35,14)), round(dec, 15) (type: decimal(36,15)), round(dec, 16) (type: decimal(37,16)), round(dec, -11) (type: decimal(21,0)), round(dec, -12) (type: decimal(21,0)), round(dec, -9) (type: decimal(21,0)), round(dec, -10) (type: decimal(21,0)), round(dec, -7) (type: decimal(21,0)), round(dec, -8) (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col2, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col3, _col31, _col32, _col33, _col4, _col5, _col6, _col7, _col8, _col9 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(21,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(21,0)), _col3 (type: decimal(21,0)), _col4 (type: decimal(21,0)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)), _col13 (type: decimal(21,0)), _col14 (type: decimal(21,0)), _col15 (type: decimal(21,0)), _col16 (type: decimal(21,0)), _col17 (type: decimal(22,1)), _col18 (type: decimal(23,2)), _col19 (type: decimal(24,3)), _col20 (type: decimal(25,4)), _col21 (type: decimal(26,5)), _col22 (type: decimal(27,6)), _col23 (type: decimal(28,7)), _col24 (type: decimal(29,8)), _col25 (type: decimal(30,9)), _col26 (type: decimal(31,10)), _col27 (type: decimal(32,11)), _col28 (type: decimal(33,12)), _col29 (type: decimal(34,13)), _col31 (type: decimal(35,14)), _col32 (type: decimal(36,15)), _col33 (type: decimal(37,16)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(21,0)), VALUE._col2 (type: decimal(21,0)), VALUE._col3 (type: decimal(21,0)), VALUE._col4 (type: decimal(21,0)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(21,0)), VALUE._col12 (type: decimal(21,0)), VALUE._col13 (type: decimal(21,0)), VALUE._col14 (type: decimal(21,0)), VALUE._col15 (type: decimal(21,0)), VALUE._col16 (type: decimal(22,1)), VALUE._col17 (type: decimal(23,2)), VALUE._col18 (type: decimal(24,3)), VALUE._col19 (type: decimal(25,4)), VALUE._col20 (type: decimal(26,5)), VALUE._col21 (type: decimal(27,6)), VALUE._col22 (type: decimal(28,7)), VALUE._col23 (type: decimal(29,8)), VALUE._col24 (type: decimal(30,9)), VALUE._col25 (type: decimal(31,10)), VALUE._col26 (type: decimal(32,11)), VALUE._col27 (type: decimal(33,12)), VALUE._col28 (type: decimal(34,13)), VALUE._col28 (type: decimal(34,13)), VALUE._col29 (type: decimal(35,14)), VALUE._col30 (type: decimal(36,15)), VALUE._col31 (type: decimal(37,16)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3.1 3.14 3.142 3.1416 3.14159 3.141593 3.1415927 3.14159265 3.141592654 3.1415926536 3.14159265359 3.141592653590 3.1415926535898 3.1415926535898 3.14159265358979 3.141592653589793 3.1415926535897930 +PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_4_orc +POSTHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_4_orc +PREHOOK: query: insert into table decimal_tbl_4_orc values(1809242.3151111344, -1809242.3151111344) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@decimal_tbl_4_orc +POSTHOOK: query: insert into table decimal_tbl_4_orc values(1809242.3151111344, -1809242.3151111344) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@decimal_tbl_4_orc +POSTHOOK: Lineage: decimal_tbl_4_orc.neg EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: decimal_tbl_4_orc.pos EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_4_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_4_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +1809242.3151111344 -1809242.3151111344 +PREHOOK: query: EXPLAIN +SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_tbl_4_orc + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9)), round(1809242.3151111344, 9) (type: decimal(17,9)), round((- 1809242.3151111344), 9) (type: decimal(17,9)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(30,9)) + sort order: + + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(30,9)), _col2 (type: decimal(17,9)), _col3 (type: decimal(17,9)) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), VALUE._col1 (type: decimal(17,9)), VALUE._col2 (type: decimal(17,9)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +1809242.315111134 -1809242.315111134 1809242.315111134 -1809242.315111134 Index: ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out (working copy) @@ -0,0 +1,121 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_TRAILING_txt ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_TRAILING_txt +POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING_txt ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_TRAILING_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_trailing_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_trailing_txt +PREHOOK: query: CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_TRAILING +POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_TRAILING +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_TRAILING SELECT * FROM DECIMAL_TRAILING_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_trailing_txt +PREHOOK: Output: default@decimal_trailing +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_TRAILING SELECT * FROM DECIMAL_TRAILING_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_trailing_txt +POSTHOOK: Output: default@decimal_trailing +POSTHOOK: Lineage: decimal_trailing.a SIMPLE [(decimal_trailing_txt)decimal_trailing_txt.FieldSchema(name:a, type:decimal(10,4), comment:null), ] +POSTHOOK: Lineage: decimal_trailing.b SIMPLE [(decimal_trailing_txt)decimal_trailing_txt.FieldSchema(name:b, type:decimal(15,8), comment:null), ] +POSTHOOK: Lineage: decimal_trailing.id SIMPLE [(decimal_trailing_txt)decimal_trailing_txt.FieldSchema(name:id, type:int, comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_trailing +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_trailing +#### A masked pattern was here #### +0 0 0 +1 0 0 +2 NULL NULL +3 1.0000 1.00000000 +4 10.0000 10.00000000 +5 100.0000 100.00000000 +6 1000.0000 1000.00000000 +7 10000.0000 10000.00000000 +8 100000.0000 100000.00000000 +9 NULL 1000000.00000000 +10 NULL NULL +11 NULL NULL +12 NULL NULL +13 NULL NULL +14 NULL NULL +15 NULL NULL +16 NULL NULL +17 NULL NULL +18 1.0000 1.00000000 +19 10.000 10.0000000 +20 100.00 100.000000 +21 1000.0 1000.00000 +22 100000 10000.0000 +23 0.0000 0.00000000 +24 0.000 0.0000000 +25 0.00 0.000000 +26 0.0 0.00000 +27 0 0.00000 +28 12313.2000 134134.31252500 +29 99999.9990 134134.31242553 +PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_trailing_txt +PREHOOK: Output: default@decimal_trailing_txt +POSTHOOK: query: DROP TABLE DECIMAL_TRAILING_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_trailing_txt +POSTHOOK: Output: default@decimal_trailing_txt +PREHOOK: query: DROP TABLE DECIMAL_TRAILING +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_trailing +PREHOOK: Output: default@decimal_trailing +POSTHOOK: query: DROP TABLE DECIMAL_TRAILING +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_trailing +POSTHOOK: Output: default@decimal_trailing Index: ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out (working copy) @@ -0,0 +1,2769 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF_txt +POSTHOOK: query: CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_udf_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_udf_txt +PREHOOK: query: CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF +POSTHOOK: query: CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf_txt +PREHOOK: Output: default@decimal_udf +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf_txt +POSTHOOK: Output: default@decimal_udf +POSTHOOK: Lineage: decimal_udf.key SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ] +POSTHOOK: Lineage: decimal_udf.value SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: -- addition +EXPLAIN SELECT key + key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- addition +EXPLAIN SELECT key + key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + key) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-8800 +NULL +0.0000000000 +0 +200 +20 +2 +0.2 +0.02 +400 +40 +4 +0 +0.4 +0.04 +0.6 +0.66 +0.666 +-0.6 +-0.66 +-0.666 +2.0 +4 +6.28 +-2.24 +-2.24 +-2.244 +2.24 +2.244 +248.00 +250.4 +-2510.98 +6.28 +6.28 +6.280 +2.0000000000 +-2469135780.2469135780 +2469135780.2469135600 +PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + value) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + value FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +0 +NULL +0.0000000000 +0 +200 +20 +2 +0.1 +0.01 +400 +40 +4 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +2.0 +4 +6.14 +-2.12 +-2.12 +-12.122 +2.12 +2.122 +248.00 +250.2 +-2510.49 +6.14 +6.14 +7.140 +2.0000000000 +-2469135780.1234567890 +2469135780.1234567800 +PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2200.0 +NULL +0.0 +0.0 +150.0 +15.0 +1.5 +0.1 +0.01 +300.0 +30.0 +3.0 +0.0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +1.5 +3.0 +4.640000000000001 +-1.62 +-1.62 +-6.622 +1.62 +1.622 +186.0 +187.7 +-1882.99 +4.640000000000001 +4.640000000000001 +5.140000000000001 +1.5 +-1.8518518351234567E9 +1.8518518351234567E9 +PREHOOK: query: EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + '1.0') (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4399.0 +NULL +1.0 +1.0 +101.0 +11.0 +2.0 +1.1 +1.01 +201.0 +21.0 +3.0 +1.0 +1.2 +1.02 +1.3 +1.33 +1.333 +0.7 +0.6699999999999999 +0.667 +2.0 +3.0 +4.140000000000001 +-0.1200000000000001 +-0.1200000000000001 +-0.12200000000000011 +2.12 +2.122 +125.0 +126.2 +-1254.49 +4.140000000000001 +4.140000000000001 +4.140000000000001 +2.0 +-1.2345678891234567E9 +1.2345678911234567E9 +PREHOOK: query: -- substraction +EXPLAIN SELECT key - key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- substraction +EXPLAIN SELECT key - key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - key) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +0 +NULL +0.0000000000 +0 +0 +0 +0 +0.0 +0.00 +0 +0 +0 +0 +0.0 +0.00 +0.0 +0.00 +0.000 +0.0 +0.00 +0.000 +0.0 +0 +0.00 +0.00 +0.00 +0.000 +0.00 +0.000 +0.00 +0.0 +0.00 +0.00 +0.00 +0.000 +0.0000000000 +0.0000000000 +0.0000000000 +PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - value) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - value FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-8800 +NULL +0.0000000000 +0 +0 +0 +0 +0.1 +0.01 +0 +0 +0 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +0.0 +0 +0.14 +-0.12 +-0.12 +9.878 +0.12 +0.122 +0.00 +0.2 +-0.49 +0.14 +0.14 +-0.860 +0.0000000000 +-0.1234567890 +0.1234567800 +PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-6600.0 +NULL +0.0 +0.0 +50.0 +5.0 +0.5 +0.1 +0.01 +100.0 +10.0 +1.0 +0.0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +0.5 +1.0 +1.6400000000000001 +-0.6200000000000001 +-0.6200000000000001 +4.378 +0.6200000000000001 +0.6220000000000001 +62.0 +62.7 +-627.99 +1.6400000000000001 +1.6400000000000001 +1.1400000000000001 +0.5 +-6.172839451234567E8 +6.172839451234567E8 +PREHOOK: query: EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - '1.0') (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4401.0 +NULL +-1.0 +-1.0 +99.0 +9.0 +0.0 +-0.9 +-0.99 +199.0 +19.0 +1.0 +-1.0 +-0.8 +-0.98 +-0.7 +-0.6699999999999999 +-0.667 +-1.3 +-1.33 +-1.333 +0.0 +1.0 +2.14 +-2.12 +-2.12 +-2.122 +0.1200000000000001 +0.12200000000000011 +123.0 +124.2 +-1256.49 +2.14 +2.14 +2.14 +0.0 +-1.2345678911234567E9 +1.2345678891234567E9 +PREHOOK: query: -- multiplication +EXPLAIN SELECT key * key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- multiplication +EXPLAIN SELECT key * key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * key) (type: decimal(38,20)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +19360000 +NULL +0.00000000000000000000 +0 +10000 +100 +1 +0.01 +0.0001 +40000 +400 +4 +0 +0.04 +0.0004 +0.09 +0.1089 +0.110889 +0.09 +0.1089 +0.110889 +1.00 +4 +9.8596 +1.2544 +1.2544 +1.258884 +1.2544 +1.258884 +15376.0000 +15675.04 +1576255.1401 +9.8596 +9.8596 +9.859600 +1.00000000000000000000 +NULL +NULL +PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key * value) > 0) (type: boolean) + Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)), value (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +100 100 +10 10 +1 1 +200 200 +20 20 +2 2 +1.0 1 +2 2 +3.14 3 +-1.12 -1 +-1.12 -1 +-1.122 -11 +1.12 1 +1.122 1 +124.00 124 +125.2 125 +-1255.49 -1255 +3.14 3 +3.14 3 +3.140 4 +1.0000000000 1 +-1234567890.1234567890 -1234567890 +1234567890.1234567800 1234567890 +PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * value) (type: decimal(31,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * value FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-19360000 +NULL +0.0000000000 +0 +10000 +100 +1 +0.0 +0.00 +40000 +400 +4 +0 +0.0 +0.00 +0.0 +0.00 +0.000 +0.0 +0.00 +0.000 +1.0 +4 +9.42 +1.12 +1.12 +12.342 +1.12 +1.122 +15376.00 +15650.0 +1575639.95 +9.42 +9.42 +12.560 +1.0000000000 +1524157875171467887.5019052100 +1524157875171467876.3907942000 +PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-9680000.0 +NULL +0.0 +0.0 +5000.0 +50.0 +0.5 +0.0 +0.0 +20000.0 +200.0 +2.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +-0.0 +-0.0 +-0.0 +0.5 +2.0 +4.71 +0.56 +0.56 +6.171 +0.56 +0.561 +7688.0 +7825.0 +787819.975 +4.71 +4.71 +6.28 +0.5 +7.6207893758573389E17 +7.6207893758573389E17 +PREHOOK: query: EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * '2.0') (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * '2.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * '2.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-8800.0 +NULL +0.0 +0.0 +200.0 +20.0 +2.0 +0.2 +0.02 +400.0 +40.0 +4.0 +0.0 +0.4 +0.04 +0.6 +0.66 +0.666 +-0.6 +-0.66 +-0.666 +2.0 +4.0 +6.28 +-2.24 +-2.24 +-2.244 +2.24 +2.244 +248.0 +250.4 +-2510.98 +6.28 +6.28 +6.28 +2.0 +-2.4691357802469134E9 +2.4691357802469134E9 +PREHOOK: query: -- division +EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +POSTHOOK: query: -- division +EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / 0) (type: decimal(22,12)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / 0 FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / 0 FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +NULL +PREHOOK: query: EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / null) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / NULL FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / NULL FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +NULL +PREHOOK: query: EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key <> 0) (type: boolean) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / key) (type: decimal(38,24)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value <> 0) (type: boolean) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / value) (type: decimal(31,21)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1 +1 +1 +1 +1 +1 +1 +1 +1 +1.046666666666666666667 +1.12 +1.12 +0.102 +1.12 +1.122 +1 +1.0016 +1.000390438247011952191 +1.046666666666666666667 +1.046666666666666666667 +0.785 +1 +1.0000000001 +1.000000000099999992710 +PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value <> 0) (type: boolean) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0933333333333333 +2.24 +2.24 +0.20400000000000001 +2.24 +2.244 +2.0 +2.0032 +2.000780876494024 +2.0933333333333333 +2.0933333333333333 +1.57 +2.0 +2.0000000002 +2.0000000002 +PREHOOK: query: EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (1 + (key / '2.0')) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2199.0 +NULL +1.0 +1.0 +51.0 +6.0 +1.5 +1.05 +1.005 +101.0 +11.0 +2.0 +1.0 +1.1 +1.01 +1.15 +1.165 +1.1665 +0.85 +0.835 +0.8335 +1.5 +2.0 +2.5700000000000003 +0.43999999999999995 +0.43999999999999995 +0.43899999999999995 +1.56 +1.561 +63.0 +63.6 +-626.745 +2.5700000000000003 +2.5700000000000003 +2.5700000000000003 +1.5 +-6.172839440617284E8 +6.172839460617284E8 +PREHOOK: query: -- abs +EXPLAIN SELECT abs(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- abs +EXPLAIN SELECT abs(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: abs(key) (type: decimal(38,18)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT abs(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +4400 +NULL +0.0000000000 +0 +100 +10 +1 +0.1 +0.01 +200 +20 +2 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +0.3 +0.33 +0.333 +1.0 +2 +3.14 +1.12 +1.12 +1.122 +1.12 +1.122 +124.00 +125.2 +1255.49 +3.14 +3.14 +3.140 +1.0000000000 +1234567890.1234567890 +1234567890.1234567800 +PREHOOK: query: -- avg +EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- avg +EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: int), key (type: decimal(20,10)) + outputColumnNames: value, key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(key), count(key), avg(key) + keys: value (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(30,10)), _col2 (type: bigint), _col3 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0), count(VALUE._col1), avg(VALUE._col2) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), (_col1 / _col2) (type: decimal(38,23)), _col3 (type: decimal(24,14)), _col1 (type: decimal(30,10)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(38,23)), _col2 (type: decimal(24,14)), _col3 (type: decimal(30,10)) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: decimal(38,23)), VALUE._col1 (type: decimal(24,14)), VALUE._col2 (type: decimal(30,10)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.1234567890 +-1255 -1255.49 -1255.49 -1255.49 +-11 -1.122 -1.122 -1.122 +-1 -1.12 -1.12 -2.24 +0 0.02538461538461538461538 0.02538461538462 0.3300000000 +1 1.0484 1.0484 5.2420000000 +2 2 2 4 +3 3.14 3.14 9.42 +4 3.14 3.14 3.140 +10 10 10 10 +20 20 20 20 +100 100 100 100 +124 124 124 124.00 +125 125.2 125.2 125.2 +200 200 200 200 +4400 -4400 -4400 -4400 +1234567890 1234567890.12345678 1234567890.12345678 1234567890.1234567800 +PREHOOK: query: -- negative +EXPLAIN SELECT -key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- negative +EXPLAIN SELECT -key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (- key) (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT -key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT -key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +4400 +NULL +0.0000000000 +0 +-100 +-10 +-1 +-0.1 +-0.01 +-200 +-20 +-2 +0 +-0.2 +-0.02 +-0.3 +-0.33 +-0.333 +0.3 +0.33 +0.333 +-1.0 +-2 +-3.14 +1.12 +1.12 +1.122 +-1.12 +-1.122 +-124.00 +-125.2 +1255.49 +-3.14 +-3.14 +-3.140 +-1.0000000000 +1234567890.1234567890 +-1234567890.1234567800 +PREHOOK: query: -- positive +EXPLAIN SELECT +key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- positive +EXPLAIN SELECT +key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: decimal_udf + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: _col0 + ListSink + +PREHOOK: query: SELECT +key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT +key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400 +NULL +0.0000000000 +0 +100 +10 +1 +0.1 +0.01 +200 +20 +2 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +1.0 +2 +3.14 +-1.12 +-1.12 +-1.122 +1.12 +1.122 +124.00 +125.2 +-1255.49 +3.14 +3.14 +3.140 +1.0000000000 +-1234567890.1234567890 +1234567890.1234567800 +PREHOOK: query: -- ceiling +EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- ceiling +EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ceil(key) (type: decimal(11,0)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT CEIL(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT CEIL(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400 +NULL +0 +0 +100 +10 +1 +1 +1 +200 +20 +2 +0 +1 +1 +1 +1 +1 +0 +0 +0 +1 +2 +4 +-1 +-1 +-1 +2 +2 +124 +126 +-1255 +4 +4 +4 +1 +-1234567890 +1234567891 +PREHOOK: query: -- floor +EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- floor +EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: floor(key) (type: decimal(11,0)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT FLOOR(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT FLOOR(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400 +NULL +0 +0 +100 +10 +1 +0 +0 +200 +20 +2 +0 +0 +0 +0 +0 +0 +-1 +-1 +-1 +1 +2 +3 +-2 +-2 +-2 +1 +1 +124 +125 +-1256 +3 +3 +3 +1 +-1234567891 +1234567890 +PREHOOK: query: -- round +EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- round +EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(key, 2) (type: decimal(13,2)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400.00 +NULL +0.00 +0.00 +100.00 +10.00 +1.00 +0.10 +0.01 +200.00 +20.00 +2.00 +0.00 +0.20 +0.02 +0.30 +0.33 +0.33 +-0.30 +-0.33 +-0.33 +1.00 +2.00 +3.14 +-1.12 +-1.12 +-1.12 +1.12 +1.12 +124.00 +125.20 +-1255.49 +3.14 +3.14 +3.14 +1.00 +-1234567890.12 +1234567890.12 +PREHOOK: query: -- power +EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- power +EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: power(key, 2) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +1.936E7 +NULL +0.0 +0.0 +10000.0 +100.0 +1.0 +0.010000000000000002 +1.0E-4 +40000.0 +400.0 +4.0 +0.0 +0.04000000000000001 +4.0E-4 +0.09 +0.10890000000000001 +0.11088900000000002 +0.09 +0.10890000000000001 +0.11088900000000002 +1.0 +4.0 +9.8596 +1.2544000000000002 +1.2544000000000002 +1.2588840000000003 +1.2544000000000002 +1.2588840000000003 +15376.0 +15675.04 +1576255.1401 +9.8596 +9.8596 +9.8596 +1.0 +1.52415787532388352E18 +1.52415787532388352E18 +PREHOOK: query: -- modulo +EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- modulo +EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ((key + 1) % (key / 2)) (type: decimal(22,12)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2199 +NULL +NULL +NULL +1 +1 +0.0 +0.00 +0.000 +1 +1 +0 +NULL +0.0 +0.00 +0.10 +0.010 +0.0010 +0.10 +0.010 +0.0010 +0.0 +0 +1.00 +-0.12 +-0.12 +-0.122 +0.44 +0.439 +1.00 +1.0 +-626.745 +1.00 +1.00 +1.000 +0.0000000000 +-617283944.0617283945 +1.0000000000 +PREHOOK: query: -- stddev, var +EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- stddev, var +EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: int), key (type: decimal(20,10)) + outputColumnNames: value, key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: stddev(key), variance(key) + keys: value (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: stddev(VALUE._col0), variance(VALUE._col1) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890 0.0 0.0 +-1255 0.0 0.0 +-11 0.0 0.0 +-1 0.0 0.0 +0 0.22561046704494161 0.050900082840236685 +1 0.05928102563215321 0.0035142400000000066 +2 0.0 0.0 +3 0.0 0.0 +4 0.0 0.0 +10 0.0 0.0 +20 0.0 0.0 +100 0.0 0.0 +124 0.0 0.0 +125 0.0 0.0 +200 0.0 0.0 +4400 0.0 0.0 +1234567890 0.0 0.0 +PREHOOK: query: -- stddev_samp, var_samp +EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- stddev_samp, var_samp +EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: int), key (type: decimal(20,10)) + outputColumnNames: value, key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: stddev_samp(key), var_samp(key) + keys: value (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: stddev_samp(VALUE._col0), var_samp(VALUE._col1) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890 0.0 0.0 +-1255 0.0 0.0 +-11 0.0 0.0 +-1 0.0 0.0 +0 0.2348228191855647 0.055141756410256405 +1 0.06627820154470102 0.004392800000000008 +2 0.0 0.0 +3 0.0 0.0 +4 0.0 0.0 +10 0.0 0.0 +20 0.0 0.0 +100 0.0 0.0 +124 0.0 0.0 +125 0.0 0.0 +200 0.0 0.0 +4400 0.0 0.0 +1234567890 0.0 0.0 +PREHOOK: query: -- histogram +EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- histogram +EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: histogram_numeric(key, 3) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: array) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: histogram_numeric(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: _col0 (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +[{"x":-1.2345678901234567E9,"y":1.0},{"x":-144.50057142857142,"y":35.0},{"x":1.2345678901234567E9,"y":1.0}] +PREHOOK: query: -- min +EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- min +EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(20,10)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890.1234567890 +PREHOOK: query: -- max +EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- max +EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(20,10)) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +1234567890.1234567800 +PREHOOK: query: -- count +EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- count +EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +37 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf_txt +PREHOOK: Output: default@decimal_udf_txt +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf_txt +POSTHOOK: Output: default@decimal_udf_txt +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf +PREHOOK: Output: default@decimal_udf +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf +POSTHOOK: Output: default@decimal_udf Index: ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out (working copy) @@ -0,0 +1,187 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_UDF2_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF2_txt +POSTHOOK: query: CREATE TABLE DECIMAL_UDF2_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF2_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_udf2_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_udf2_txt +PREHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF2 +POSTHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF2 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2_txt +PREHOOK: Output: default@decimal_udf2 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2_txt +POSTHOOK: Output: default@decimal_udf2 +POSTHOOK: Lineage: decimal_udf2.key SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ] +POSTHOOK: Lineage: decimal_udf2.value SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 10) (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: acos(key) (type: double), asin(key) (type: double), atan(key) (type: double), cos(key) (type: double), sin(key) (type: double), tan(key) (type: double), radians(key) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +NaN NaN 1.4711276743037347 -0.8390715290764524 -0.5440211108893698 0.6483608274590866 0.17453292519943295 +PREHOOK: query: EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_udf2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 10) (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: exp(key) (type: double), ln(key) (type: double), log(key) (type: double), log(key, key) (type: double), log(key, value) (type: double), log(value, key) (type: double), log10(key) (type: double), sqrt(key) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +22026.465794806718 2.302585092994046 2.302585092994046 1.0 1.0 1.0 1.0 3.1622776601683795 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf2_txt +PREHOOK: Output: default@decimal_udf2_txt +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf2_txt +POSTHOOK: Output: default@decimal_udf2_txt +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf2 +PREHOOK: Output: default@decimal_udf2 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Output: default@decimal_udf2 Index: ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out (working copy) @@ -1,104 +1,6 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: -- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. +-- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. -- Query copied from subquery_in.q -- non agg, non corr, with join in Parent Query @@ -108,7 +10,9 @@ where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') PREHOOK: type: QUERY -POSTHOOK: query: -- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. -- Query copied from subquery_in.q -- non agg, non corr, with join in Parent Query @@ -133,71 +37,71 @@ Map Operator Tree: TableScan alias: li - Statistics: Num rows: 756 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_partkey is not null and l_orderkey is not null) and (l_linenumber = 1)) (type: boolean) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: l_partkey (type: int) sort order: + Map-reduce partition columns: l_partkey (type: int) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE value expressions: l_orderkey (type: int), l_suppkey (type: int) Map 2 Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: l_partkey is not null (type: boolean) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_partkey (type: int) outputColumnNames: l_partkey - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: l_partkey (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 1728 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_shipmode = 'AIR') and l_orderkey is not null) (type: boolean) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_orderkey (type: int) outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -210,7 +114,7 @@ outputColumnNames: _col0, _col1, _col3 input vertices: 1 Map 1 - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Semi Join 0 to 1 @@ -223,14 +127,14 @@ outputColumnNames: _col0, _col3 input vertices: 1 Map 4 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -296,71 +200,71 @@ Map Operator Tree: TableScan alias: li - Statistics: Num rows: 756 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((l_partkey is not null and l_orderkey is not null) and l_linenumber is not null) and (l_linenumber = 1)) (type: boolean) - Statistics: Num rows: 47 Data size: 752 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: l_partkey (type: int) sort order: + Map-reduce partition columns: l_partkey (type: int) - Statistics: Num rows: 47 Data size: 752 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE value expressions: l_orderkey (type: int), l_suppkey (type: int) Map 2 Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: l_partkey is not null (type: boolean) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_partkey (type: int) outputColumnNames: l_partkey - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: l_partkey (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 1099 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + predicate: ((((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) and (l_linenumber = 1)) (type: boolean) + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int), l_linenumber (type: int) + expressions: l_orderkey (type: int), 1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -373,7 +277,7 @@ outputColumnNames: _col0, _col1, _col3 input vertices: 1 Map 1 - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Semi Join 0 to 1 @@ -386,14 +290,14 @@ outputColumnNames: _col0, _col3 input vertices: 1 Map 4 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/tez/vectorization_limit.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vectorization_limit.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vectorization_limit.q.out (working copy) @@ -0,0 +1,565 @@ +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: explain SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7 +PREHOOK: type: QUERY +POSTHOOK: query: explain SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: ((cbigint < cdouble) and (cint > 0)) (type: boolean) + Select Operator + expressions: cbigint (type: bigint), cdouble (type: double) + outputColumnNames: _col0, _col1 + Limit + Number of rows: 7 + ListSink + +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-1887561756 1839.0 +-1887561756 -10011.0 +-1887561756 -13877.0 +-1887561756 10361.0 +-1887561756 -8881.0 +-1887561756 -2281.0 +-1887561756 9531.0 +PREHOOK: query: -- HIVE-3562 Some limit can be pushed down to map stage - c/p parts from limit_pushdown + +explain +select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- HIVE-3562 Some limit can be pushed down to map stage - c/p parts from limit_pushdown + +explain +select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ctinyint is not null (type: boolean) + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cdouble (type: double), csmallint (type: smallint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint), _col1 (type: double) + sort order: ++ + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + TopN Hash Memory Usage: 0.3 + value expressions: _col2 (type: smallint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: smallint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 20 + Processor Tree: + ListSink + +PREHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-64 -15920.0 -15920 +-64 -10462.0 -10462 +-64 -9842.0 -9842 +-64 -8080.0 -8080 +-64 -7196.0 -7196 +-64 -7196.0 -7196 +-64 -7196.0 -7196 +-64 -7196.0 -7196 +-64 -7196.0 -7196 +-64 -7196.0 -7196 +-64 -7196.0 -7196 +-64 -6907.0 -6907 +-64 -4803.0 -4803 +-64 -4040.0 -4040 +-64 -4018.0 -4018 +-64 -3586.0 -3586 +-64 -3097.0 -3097 +-64 -2919.0 -2919 +-64 -1600.0 -1600 +-64 -200.0 -200 +PREHOOK: query: -- deduped RS +explain +select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- deduped RS +explain +select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cdouble (type: double) + outputColumnNames: ctinyint, cdouble + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg((cdouble + 1)) + keys: ctinyint (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + TopN Hash Memory Usage: 0.3 + value expressions: _col1 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0) + keys: KEY._col0 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: tinyint), _col1 (type: double) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + Processor Tree: + ListSink + +PREHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL 9370.0945309795 +-64 373.52941176470586 +-63 2178.7272727272725 +-62 245.69387755102042 +-61 914.3404255319149 +-60 1071.82 +-59 318.27272727272725 +-58 3483.2444444444445 +-57 1867.0535714285713 +-56 2595.818181818182 +-55 2385.595744680851 +-54 2712.7272727272725 +-53 -532.7567567567568 +-52 2810.705882352941 +-51 -96.46341463414635 +-50 -960.0192307692307 +-49 768.7659574468086 +-48 1672.909090909091 +-47 -574.6428571428571 +-46 3033.55 +PREHOOK: query: -- distincts +explain +select distinct(ctinyint) from alltypesorc limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- distincts +explain +select distinct(ctinyint) from alltypesorc limit 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ctinyint (type: tinyint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + TopN Hash Memory Usage: 0.3 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 20 + Processor Tree: + ListSink + +PREHOOK: query: select distinct(ctinyint) from alltypesorc limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select distinct(ctinyint) from alltypesorc limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL +-64 +-63 +-62 +-61 +-60 +-59 +-58 +-57 +-56 +-55 +-54 +-53 +-52 +-51 +-50 +-49 +-48 +-47 +-46 +PREHOOK: query: explain +select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cdouble (type: double) + outputColumnNames: ctinyint, cdouble + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(DISTINCT cdouble) + keys: ctinyint (type: tinyint), cdouble (type: double) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint), _col1 (type: double) + sort order: ++ + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + TopN Hash Memory Usage: 0.3 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col1:0._col0) + keys: KEY._col0 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: tinyint), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + Processor Tree: + ListSink + +PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL 2932 +-64 24 +-63 19 +-62 27 +-61 25 +-60 27 +-59 31 +-58 23 +-57 35 +-56 36 +-55 29 +-54 26 +-53 22 +-52 33 +-51 21 +-50 30 +-49 26 +-48 29 +-47 22 +-46 24 +PREHOOK: query: -- limit zero +explain +select ctinyint,cdouble from alltypesorc order by ctinyint limit 0 +PREHOOK: type: QUERY +POSTHOOK: query: -- limit zero +explain +select ctinyint,cdouble from alltypesorc order by ctinyint limit 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 0 + Processor Tree: + ListSink + +PREHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +PREHOOK: query: -- 2MR (applied to last RS) +explain +select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- 2MR (applied to last RS) +explain +select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ctinyint is not null (type: boolean) + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdouble (type: double), ctinyint (type: tinyint) + outputColumnNames: cdouble, ctinyint + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(ctinyint) + keys: cdouble (type: double) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Map-reduce partition columns: _col0 (type: double) + Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: double) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3072 Data size: 94309 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3072 Data size: 94309 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: bigint), _col0 (type: double) + sort order: ++ + Statistics: Num rows: 3072 Data size: 94309 Basic stats: COMPLETE Column stats: NONE + TopN Hash Memory Usage: 0.3 + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey1 (type: double), KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3072 Data size: 94309 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 20 + Processor Tree: + ListSink + +PREHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL -32768 +-7196.0 -2009 +15601.0 -1733 +4811.0 -115 +-11322.0 -101 +-1121.0 -89 +7705.0 -88 +3520.0 -86 +-8118.0 -80 +5241.0 -80 +-11492.0 -78 +9452.0 -76 +557.0 -75 +10496.0 -67 +-15920.0 -64 +-10462.0 -64 +-9842.0 -64 +-8080.0 -64 +-6907.0 -64 +-4803.0 -64 Index: ql/src/test/results/clientpositive/tez/vectorized_casts.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vectorized_casts.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vectorized_casts.q.out (working copy) @@ -0,0 +1,337 @@ +PREHOOK: query: -- Test type casting in vectorized mode to verify end-to-end functionality. + +explain +select +-- to boolean + cast (ctinyint as boolean) + ,cast (csmallint as boolean) + ,cast (cint as boolean) + ,cast (cbigint as boolean) + ,cast (cfloat as boolean) + ,cast (cdouble as boolean) + ,cast (cboolean1 as boolean) + ,cast (cbigint * 0 as boolean) + ,cast (ctimestamp1 as boolean) + ,cast (cstring1 as boolean) +-- to int family + ,cast (ctinyint as int) + ,cast (csmallint as int) + ,cast (cint as int) + ,cast (cbigint as int) + ,cast (cfloat as int) + ,cast (cdouble as int) + ,cast (cboolean1 as int) + ,cast (ctimestamp1 as int) + ,cast (cstring1 as int) + ,cast (substr(cstring1, 1, 1) as int) + ,cast (cfloat as tinyint) + ,cast (cfloat as smallint) + ,cast (cfloat as bigint) +-- to float family + ,cast (ctinyint as double) + ,cast (csmallint as double) + ,cast (cint as double) + ,cast (cbigint as double) + ,cast (cfloat as double) + ,cast (cdouble as double) + ,cast (cboolean1 as double) + ,cast (ctimestamp1 as double) + ,cast (cstring1 as double) + ,cast (substr(cstring1, 1, 1) as double) + ,cast (cint as float) + ,cast (cdouble as float) +-- to timestamp + ,cast (ctinyint as timestamp) + ,cast (csmallint as timestamp) + ,cast (cint as timestamp) + ,cast (cbigint as timestamp) + ,cast (cfloat as timestamp) + ,cast (cdouble as timestamp) + ,cast (cboolean1 as timestamp) + ,cast (cbigint * 0 as timestamp) + ,cast (ctimestamp1 as timestamp) + ,cast (cstring1 as timestamp) + ,cast (substr(cstring1, 1, 1) as timestamp) +-- to string + ,cast (ctinyint as string) + ,cast (csmallint as string) + ,cast (cint as string) + ,cast (cbigint as string) + ,cast (cfloat as string) + ,cast (cdouble as string) + ,cast (cboolean1 as string) + ,cast (cbigint * 0 as string) + ,cast (ctimestamp1 as string) + ,cast (cstring1 as string) +-- nested and expression arguments + ,cast (cast (cfloat as int) as float) + ,cast (cint * 2 as double) + ,cast (sin(cfloat) as string) + ,cast (cint as float) + cast(cboolean1 as double) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 250 = 0 +PREHOOK: type: QUERY +POSTHOOK: query: -- Test type casting in vectorized mode to verify end-to-end functionality. + +explain +select +-- to boolean + cast (ctinyint as boolean) + ,cast (csmallint as boolean) + ,cast (cint as boolean) + ,cast (cbigint as boolean) + ,cast (cfloat as boolean) + ,cast (cdouble as boolean) + ,cast (cboolean1 as boolean) + ,cast (cbigint * 0 as boolean) + ,cast (ctimestamp1 as boolean) + ,cast (cstring1 as boolean) +-- to int family + ,cast (ctinyint as int) + ,cast (csmallint as int) + ,cast (cint as int) + ,cast (cbigint as int) + ,cast (cfloat as int) + ,cast (cdouble as int) + ,cast (cboolean1 as int) + ,cast (ctimestamp1 as int) + ,cast (cstring1 as int) + ,cast (substr(cstring1, 1, 1) as int) + ,cast (cfloat as tinyint) + ,cast (cfloat as smallint) + ,cast (cfloat as bigint) +-- to float family + ,cast (ctinyint as double) + ,cast (csmallint as double) + ,cast (cint as double) + ,cast (cbigint as double) + ,cast (cfloat as double) + ,cast (cdouble as double) + ,cast (cboolean1 as double) + ,cast (ctimestamp1 as double) + ,cast (cstring1 as double) + ,cast (substr(cstring1, 1, 1) as double) + ,cast (cint as float) + ,cast (cdouble as float) +-- to timestamp + ,cast (ctinyint as timestamp) + ,cast (csmallint as timestamp) + ,cast (cint as timestamp) + ,cast (cbigint as timestamp) + ,cast (cfloat as timestamp) + ,cast (cdouble as timestamp) + ,cast (cboolean1 as timestamp) + ,cast (cbigint * 0 as timestamp) + ,cast (ctimestamp1 as timestamp) + ,cast (cstring1 as timestamp) + ,cast (substr(cstring1, 1, 1) as timestamp) +-- to string + ,cast (ctinyint as string) + ,cast (csmallint as string) + ,cast (cint as string) + ,cast (cbigint as string) + ,cast (cfloat as string) + ,cast (cdouble as string) + ,cast (cboolean1 as string) + ,cast (cbigint * 0 as string) + ,cast (ctimestamp1 as string) + ,cast (cstring1 as string) +-- nested and expression arguments + ,cast (cast (cfloat as int) as float) + ,cast (cint * 2 as double) + ,cast (sin(cfloat) as string) + ,cast (cint as float) + cast(cboolean1 as double) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 250 = 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: ((cbigint % 250) = 0) (type: boolean) + Select Operator + expressions: UDFToBoolean(ctinyint) (type: boolean), UDFToBoolean(csmallint) (type: boolean), UDFToBoolean(cint) (type: boolean), UDFToBoolean(cbigint) (type: boolean), UDFToBoolean(cfloat) (type: boolean), UDFToBoolean(cdouble) (type: boolean), cboolean1 (type: boolean), UDFToBoolean((cbigint * 0)) (type: boolean), UDFToBoolean(ctimestamp1) (type: boolean), UDFToBoolean(cstring1) (type: boolean), UDFToInteger(ctinyint) (type: int), UDFToInteger(csmallint) (type: int), cint (type: int), UDFToInteger(cbigint) (type: int), UDFToInteger(cfloat) (type: int), UDFToInteger(cdouble) (type: int), UDFToInteger(cboolean1) (type: int), UDFToInteger(ctimestamp1) (type: int), UDFToInteger(cstring1) (type: int), UDFToInteger(substr(cstring1, 1, 1)) (type: int), UDFToByte(cfloat) (type: tinyint), UDFToShort(cfloat) (type: smallint), UDFToLong(cfloat) (type: bigint), UDFToDouble(ctinyint) (type: double), UDFToDouble(csmallint) (type: double), UDFToDouble(cint) (type: double), UDFToDouble(cbigint) (type: double), UDFToDouble(cfloat) (type: double), cdouble (type: double), UDFToDouble(cboolean1) (type: double), UDFToDouble(ctimestamp1) (type: double), UDFToDouble(cstring1) (type: double), UDFToDouble(substr(cstring1, 1, 1)) (type: double), UDFToFloat(cint) (type: float), UDFToFloat(cdouble) (type: float), CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp), UDFToString(ctinyint) (type: string), UDFToString(csmallint) (type: string), UDFToString(cint) (type: string), UDFToString(cbigint) (type: string), UDFToString(cfloat) (type: string), UDFToString(cdouble) (type: string), UDFToString(cboolean1) (type: string), UDFToString((cbigint * 0)) (type: string), UDFToString(ctimestamp1) (type: string), cstring1 (type: string), UDFToFloat(UDFToInteger(cfloat)) (type: float), UDFToDouble((cint * 2)) (type: double), UDFToString(sin(cfloat)) (type: string), (UDFToFloat(cint) + UDFToDouble(cboolean1)) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55, _col56, _col57, _col58, _col59 + ListSink + +PREHOOK: query: select +-- to boolean + cast (ctinyint as boolean) + ,cast (csmallint as boolean) + ,cast (cint as boolean) + ,cast (cbigint as boolean) + ,cast (cfloat as boolean) + ,cast (cdouble as boolean) + ,cast (cboolean1 as boolean) + ,cast (cbigint * 0 as boolean) + ,cast (ctimestamp1 as boolean) + ,cast (cstring1 as boolean) +-- to int family + ,cast (ctinyint as int) + ,cast (csmallint as int) + ,cast (cint as int) + ,cast (cbigint as int) + ,cast (cfloat as int) + ,cast (cdouble as int) + ,cast (cboolean1 as int) + ,cast (ctimestamp1 as int) + ,cast (cstring1 as int) + ,cast (substr(cstring1, 1, 1) as int) + ,cast (cfloat as tinyint) + ,cast (cfloat as smallint) + ,cast (cfloat as bigint) +-- to float family + ,cast (ctinyint as double) + ,cast (csmallint as double) + ,cast (cint as double) + ,cast (cbigint as double) + ,cast (cfloat as double) + ,cast (cdouble as double) + ,cast (cboolean1 as double) + ,cast (ctimestamp1 as double) + ,cast (cstring1 as double) + ,cast (substr(cstring1, 1, 1) as double) + ,cast (cint as float) + ,cast (cdouble as float) +-- to timestamp + ,cast (ctinyint as timestamp) + ,cast (csmallint as timestamp) + ,cast (cint as timestamp) + ,cast (cbigint as timestamp) + ,cast (cfloat as timestamp) + ,cast (cdouble as timestamp) + ,cast (cboolean1 as timestamp) + ,cast (cbigint * 0 as timestamp) + ,cast (ctimestamp1 as timestamp) + ,cast (cstring1 as timestamp) + ,cast (substr(cstring1, 1, 1) as timestamp) +-- to string + ,cast (ctinyint as string) + ,cast (csmallint as string) + ,cast (cint as string) + ,cast (cbigint as string) + ,cast (cfloat as string) + ,cast (cdouble as string) + ,cast (cboolean1 as string) + ,cast (cbigint * 0 as string) + ,cast (ctimestamp1 as string) + ,cast (cstring1 as string) +-- nested and expression arguments + ,cast (cast (cfloat as int) as float) + ,cast (cint * 2 as double) + ,cast (sin(cfloat) as string) + ,cast (cint as float) + cast(cboolean1 as double) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 250 = 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select +-- to boolean + cast (ctinyint as boolean) + ,cast (csmallint as boolean) + ,cast (cint as boolean) + ,cast (cbigint as boolean) + ,cast (cfloat as boolean) + ,cast (cdouble as boolean) + ,cast (cboolean1 as boolean) + ,cast (cbigint * 0 as boolean) + ,cast (ctimestamp1 as boolean) + ,cast (cstring1 as boolean) +-- to int family + ,cast (ctinyint as int) + ,cast (csmallint as int) + ,cast (cint as int) + ,cast (cbigint as int) + ,cast (cfloat as int) + ,cast (cdouble as int) + ,cast (cboolean1 as int) + ,cast (ctimestamp1 as int) + ,cast (cstring1 as int) + ,cast (substr(cstring1, 1, 1) as int) + ,cast (cfloat as tinyint) + ,cast (cfloat as smallint) + ,cast (cfloat as bigint) +-- to float family + ,cast (ctinyint as double) + ,cast (csmallint as double) + ,cast (cint as double) + ,cast (cbigint as double) + ,cast (cfloat as double) + ,cast (cdouble as double) + ,cast (cboolean1 as double) + ,cast (ctimestamp1 as double) + ,cast (cstring1 as double) + ,cast (substr(cstring1, 1, 1) as double) + ,cast (cint as float) + ,cast (cdouble as float) +-- to timestamp + ,cast (ctinyint as timestamp) + ,cast (csmallint as timestamp) + ,cast (cint as timestamp) + ,cast (cbigint as timestamp) + ,cast (cfloat as timestamp) + ,cast (cdouble as timestamp) + ,cast (cboolean1 as timestamp) + ,cast (cbigint * 0 as timestamp) + ,cast (ctimestamp1 as timestamp) + ,cast (cstring1 as timestamp) + ,cast (substr(cstring1, 1, 1) as timestamp) +-- to string + ,cast (ctinyint as string) + ,cast (csmallint as string) + ,cast (cint as string) + ,cast (cbigint as string) + ,cast (cfloat as string) + ,cast (cdouble as string) + ,cast (cboolean1 as string) + ,cast (cbigint * 0 as string) + ,cast (ctimestamp1 as string) + ,cast (cstring1 as string) +-- nested and expression arguments + ,cast (cast (cfloat as int) as float) + ,cast (cint * 2 as double) + ,cast (sin(cfloat) as string) + ,cast (cint as float) + cast(cboolean1 as double) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 250 = 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +true true NULL true true true NULL false true NULL -36 -200 NULL -2006216750 -36 -200 NULL -15 NULL NULL -36 -36 -36 -36.0 -200.0 NULL -2.00621675E9 -36.0 -200.0 NULL -14.252 NULL NULL NULL -200.0 1969-12-31 15:59:59.964 1969-12-31 15:59:59.8 NULL 1969-12-08 10:43:03.25 1969-12-31 15:59:24 1969-12-31 15:56:40 NULL 1969-12-31 16:00:00 1969-12-31 15:59:45.748 NULL NULL -36 -200 NULL -2006216750 -36.0 -200.0 NULL 0 1969-12-31 15:59:45.748 NULL -36.0 NULL 0.9917788534431158 NULL +true true NULL true true true NULL false true NULL -36 -200 NULL 1599879000 -36 -200 NULL -7 NULL NULL -36 -36 -36 -36.0 -200.0 NULL 1.599879E9 -36.0 -200.0 NULL -6.183 NULL NULL NULL -200.0 1969-12-31 15:59:59.964 1969-12-31 15:59:59.8 NULL 1970-01-19 04:24:39 1969-12-31 15:59:24 1969-12-31 15:56:40 NULL 1969-12-31 16:00:00 1969-12-31 15:59:53.817 NULL NULL -36 -200 NULL 1599879000 -36.0 -200.0 NULL 0 1969-12-31 15:59:53.817 NULL -36.0 NULL 0.9917788534431158 NULL +true true NULL true true true NULL false true NULL -30 -200 NULL 1429852250 -30 -200 NULL 12 NULL NULL -30 -30 -30 -30.0 -200.0 NULL 1.42985225E9 -30.0 -200.0 NULL 12.935 NULL NULL NULL -200.0 1969-12-31 15:59:59.97 1969-12-31 15:59:59.8 NULL 1970-01-17 05:10:52.25 1969-12-31 15:59:30 1969-12-31 15:56:40 NULL 1969-12-31 16:00:00 1969-12-31 16:00:12.935 NULL NULL -30 -200 NULL 1429852250 -30.0 -200.0 NULL 0 1969-12-31 16:00:12.935 NULL -30.0 NULL 0.9880316240928618 NULL +true NULL true true true NULL false false true true -51 NULL 773600971 1053923250 -51 NULL 0 8 NULL 2 -51 -51 -51 -51.0 NULL 7.73600971E8 1.05392325E9 -51.0 NULL 0.0 8.451 NULL 2.0 7.7360096E8 NULL 1969-12-31 15:59:59.949 NULL 1970-01-09 14:53:20.971 1970-01-12 20:45:23.25 1969-12-31 15:59:09 NULL 1969-12-31 16:00:00 1969-12-31 16:00:00 1969-12-31 16:00:08.451 NULL NULL -51 NULL 773600971 1053923250 -51.0 NULL FALSE 0 1969-12-31 16:00:08.451 2yK4Bx76O -51.0 1.547201942E9 -0.6702291758433747 7.7360096E8 +true NULL true true true NULL true false true true -51 NULL 747553882 -1930467250 -51 NULL 1 8 NULL NULL -51 -51 -51 -51.0 NULL 7.47553882E8 -1.93046725E9 -51.0 NULL 1.0 8.451 NULL NULL 7.4755386E8 NULL 1969-12-31 15:59:59.949 NULL 1970-01-09 07:39:13.882 1969-12-09 07:45:32.75 1969-12-31 15:59:09 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:08.451 NULL NULL -51 NULL 747553882 -1930467250 -51.0 NULL TRUE 0 1969-12-31 16:00:08.451 q8M86Fx0r -51.0 1.495107764E9 -0.6702291758433747 7.47553857E8 +true true NULL true true true NULL false true NULL 20 15601 NULL -362433250 20 15601 NULL -15 NULL NULL 20 20 20 20.0 15601.0 NULL -3.6243325E8 20.0 15601.0 NULL -14.871 NULL NULL NULL 15601.0 1969-12-31 16:00:00.02 1969-12-31 16:00:15.601 NULL 1969-12-27 11:19:26.75 1969-12-31 16:00:20 1969-12-31 20:20:01 NULL 1969-12-31 16:00:00 1969-12-31 15:59:45.129 NULL NULL 20 15601 NULL -362433250 20.0 15601.0 NULL 0 1969-12-31 15:59:45.129 NULL 20.0 NULL 0.9129452507276277 NULL +true true NULL true true true NULL false true NULL -38 15601 NULL -1858689000 -38 15601 NULL -2 NULL NULL -38 -38 -38 -38.0 15601.0 NULL -1.858689E9 -38.0 15601.0 NULL -1.3860000000000001 NULL NULL NULL 15601.0 1969-12-31 15:59:59.962 1969-12-31 16:00:15.601 NULL 1969-12-10 03:41:51 1969-12-31 15:59:22 1969-12-31 20:20:01 NULL 1969-12-31 16:00:00 1969-12-31 15:59:58.614 NULL NULL -38 15601 NULL -1858689000 -38.0 15601.0 NULL 0 1969-12-31 15:59:58.614 NULL -38.0 NULL -0.2963685787093853 NULL +true true NULL true true true NULL false true NULL -5 15601 NULL 612416000 -5 15601 NULL 4 NULL NULL -5 -5 -5 -5.0 15601.0 NULL 6.12416E8 -5.0 15601.0 NULL 4.679 NULL NULL NULL 15601.0 1969-12-31 15:59:59.995 1969-12-31 16:00:15.601 NULL 1970-01-07 18:06:56 1969-12-31 15:59:55 1969-12-31 20:20:01 NULL 1969-12-31 16:00:00 1969-12-31 16:00:04.679 NULL NULL -5 15601 NULL 612416000 -5.0 15601.0 NULL 0 1969-12-31 16:00:04.679 NULL -5.0 NULL 0.9589242746631385 NULL +true true NULL true true true NULL false true NULL 48 15601 NULL -795361000 48 15601 NULL -10 NULL NULL 48 48 48 48.0 15601.0 NULL -7.95361E8 48.0 15601.0 NULL -9.765 NULL NULL NULL 15601.0 1969-12-31 16:00:00.048 1969-12-31 16:00:15.601 NULL 1969-12-22 11:03:59 1969-12-31 16:00:48 1969-12-31 20:20:01 NULL 1969-12-31 16:00:00 1969-12-31 15:59:50.235 NULL NULL 48 15601 NULL -795361000 48.0 15601.0 NULL 0 1969-12-31 15:59:50.235 NULL 48.0 NULL -0.7682546613236668 NULL +true NULL true true true NULL false false true true 8 NULL -661621138 -931392750 8 NULL 0 15 NULL NULL 8 8 8 8.0 NULL -6.61621138E8 -9.3139275E8 8.0 NULL 0.0 15.892 NULL NULL -6.6162112E8 NULL 1969-12-31 16:00:00.008 NULL 1969-12-24 00:12:58.862 1969-12-20 21:16:47.25 1969-12-31 16:00:08 NULL 1969-12-31 16:00:00 1969-12-31 16:00:00 1969-12-31 16:00:15.892 NULL NULL 8 NULL -661621138 -931392750 8.0 NULL FALSE 0 1969-12-31 16:00:15.892 L15l8i5k558tBcDV20 8.0 -1.323242276E9 0.9893582466233818 -6.6162112E8 +true NULL true true true NULL false false true true 8 NULL -102936434 -1312782750 8 NULL 0 15 NULL NULL 8 8 8 8.0 NULL -1.02936434E8 -1.31278275E9 8.0 NULL 0.0 15.892 NULL NULL -1.02936432E8 NULL 1969-12-31 16:00:00.008 NULL 1969-12-30 11:24:23.566 1969-12-16 11:20:17.25 1969-12-31 16:00:08 NULL 1969-12-31 16:00:00 1969-12-31 16:00:00 1969-12-31 16:00:15.892 NULL NULL 8 NULL -102936434 -1312782750 8.0 NULL FALSE 0 1969-12-31 16:00:15.892 eJROSNhugc3kQR7Pb 8.0 -2.05872868E8 0.9893582466233818 -1.02936432E8 +true NULL true true true NULL false false true true 8 NULL 805179664 868161500 8 NULL 0 15 NULL NULL 8 8 8 8.0 NULL 8.05179664E8 8.681615E8 8.0 NULL 0.0 15.892 NULL NULL 8.0517965E8 NULL 1969-12-31 16:00:00.008 NULL 1970-01-09 23:39:39.664 1970-01-10 17:09:21.5 1969-12-31 16:00:08 NULL 1969-12-31 16:00:00 1969-12-31 16:00:00 1969-12-31 16:00:15.892 NULL NULL 8 NULL 805179664 868161500 8.0 NULL FALSE 0 1969-12-31 16:00:15.892 e005B5q 8.0 1.610359328E9 0.9893582466233818 8.05179648E8 +true NULL true true true NULL false false true true 8 NULL -669632311 1588591250 8 NULL 0 15 NULL 3 8 8 8 8.0 NULL -6.69632311E8 1.58859125E9 8.0 NULL 0.0 15.892 NULL 3.0 -6.6963232E8 NULL 1969-12-31 16:00:00.008 NULL 1969-12-23 21:59:27.689 1970-01-19 01:16:31.25 1969-12-31 16:00:08 NULL 1969-12-31 16:00:00 1969-12-31 16:00:00 1969-12-31 16:00:15.892 NULL NULL 8 NULL -669632311 1588591250 8.0 NULL FALSE 0 1969-12-31 16:00:15.892 3r3sDvfUkG0yTP3LnX5mNQRr 8.0 -1.339264622E9 0.9893582466233818 -6.6963232E8 +true NULL true true true NULL true false true true 8 NULL 890988972 -1862301000 8 NULL 1 15 NULL NULL 8 8 8 8.0 NULL 8.90988972E8 -1.862301E9 8.0 NULL 1.0 15.892 NULL NULL 8.9098899E8 NULL 1969-12-31 16:00:00.008 NULL 1970-01-10 23:29:48.972 1969-12-10 02:41:39 1969-12-31 16:00:08 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:15.892 NULL NULL 8 NULL 890988972 -1862301000 8.0 NULL TRUE 0 1969-12-31 16:00:15.892 XylAH4 8.0 1.781977944E9 0.9893582466233818 8.90988993E8 +true NULL true true true NULL true false true true 8 NULL 930867246 1205399250 8 NULL 1 15 NULL NULL 8 8 8 8.0 NULL 9.30867246E8 1.20539925E9 8.0 NULL 1.0 15.892 NULL NULL 9.3086726E8 NULL 1969-12-31 16:00:00.008 NULL 1970-01-11 10:34:27.246 1970-01-14 14:49:59.25 1969-12-31 16:00:08 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:15.892 NULL NULL 8 NULL 930867246 1205399250 8.0 NULL TRUE 0 1969-12-31 16:00:15.892 c1V8o1A 8.0 1.861734492E9 0.9893582466233818 9.30867265E8 +true true NULL true true true NULL false true NULL -59 -7196 NULL -1604890000 -59 -7196 NULL 13 NULL NULL -59 -59 -59 -59.0 -7196.0 NULL -1.60489E9 -59.0 -7196.0 NULL 13.15 NULL NULL NULL -7196.0 1969-12-31 15:59:59.941 1969-12-31 15:59:52.804 NULL 1969-12-13 02:11:50 1969-12-31 15:59:01 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 16:00:13.15 NULL NULL -59 -7196 NULL -1604890000 -59.0 -7196.0 NULL 0 1969-12-31 16:00:13.15 NULL -59.0 NULL -0.6367380071391379 NULL +true true NULL true true true NULL false true NULL -21 -7196 NULL 1542429000 -21 -7196 NULL -5 NULL NULL -21 -21 -21 -21.0 -7196.0 NULL 1.542429E9 -21.0 -7196.0 NULL -4.1 NULL NULL NULL -7196.0 1969-12-31 15:59:59.979 1969-12-31 15:59:52.804 NULL 1970-01-18 12:27:09 1969-12-31 15:59:39 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 15:59:55.9 NULL NULL -21 -7196 NULL 1542429000 -21.0 -7196.0 NULL 0 1969-12-31 15:59:55.9 NULL -21.0 NULL -0.8366556385360561 NULL +true true NULL true true true NULL false true NULL -60 -7196 NULL 1516314750 -60 -7196 NULL -8 NULL NULL -60 -60 -60 -60.0 -7196.0 NULL 1.51631475E9 -60.0 -7196.0 NULL -7.592 NULL NULL NULL -7196.0 1969-12-31 15:59:59.94 1969-12-31 15:59:52.804 NULL 1970-01-18 05:11:54.75 1969-12-31 15:59:00 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 15:59:52.408 NULL NULL -60 -7196 NULL 1516314750 -60.0 -7196.0 NULL 0 1969-12-31 15:59:52.408 NULL -60.0 NULL 0.3048106211022167 NULL +true true NULL true true true NULL false true NULL -14 -7196 NULL -1552199500 -14 -7196 NULL 11 NULL NULL -14 -14 -14 -14.0 -7196.0 NULL -1.5521995E9 -14.0 -7196.0 NULL 11.065 NULL NULL NULL -7196.0 1969-12-31 15:59:59.986 1969-12-31 15:59:52.804 NULL 1969-12-13 16:50:00.5 1969-12-31 15:59:46 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 16:00:11.065 NULL NULL -14 -7196 NULL -1552199500 -14.0 -7196.0 NULL 0 1969-12-31 16:00:11.065 NULL -14.0 NULL -0.9906073556948704 NULL +true true NULL true true true NULL false true NULL 59 -7196 NULL -1137754500 59 -7196 NULL 10 NULL NULL 59 59 59 59.0 -7196.0 NULL -1.1377545E9 59.0 -7196.0 NULL 10.956 NULL NULL NULL -7196.0 1969-12-31 16:00:00.059 1969-12-31 15:59:52.804 NULL 1969-12-18 11:57:25.5 1969-12-31 16:00:59 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 16:00:10.956 NULL NULL 59 -7196 NULL -1137754500 59.0 -7196.0 NULL 0 1969-12-31 16:00:10.956 NULL 59.0 NULL 0.6367380071391379 NULL +true true NULL true true true NULL false true NULL -8 -7196 NULL -1849991500 -8 -7196 NULL 3 NULL NULL -8 -8 -8 -8.0 -7196.0 NULL -1.8499915E9 -8.0 -7196.0 NULL 3.136 NULL NULL NULL -7196.0 1969-12-31 15:59:59.992 1969-12-31 15:59:52.804 NULL 1969-12-10 06:06:48.5 1969-12-31 15:59:52 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 16:00:03.136 NULL NULL -8 -7196 NULL -1849991500 -8.0 -7196.0 NULL 0 1969-12-31 16:00:03.136 NULL -8.0 NULL -0.9893582466233818 NULL +true true NULL true true true NULL false true NULL 5 -7196 NULL -1015607500 5 -7196 NULL 10 NULL NULL 5 5 5 5.0 -7196.0 NULL -1.0156075E9 5.0 -7196.0 NULL 10.973 NULL NULL NULL -7196.0 1969-12-31 16:00:00.005 1969-12-31 15:59:52.804 NULL 1969-12-19 21:53:12.5 1969-12-31 16:00:05 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 16:00:10.973 NULL NULL 5 -7196 NULL -1015607500 5.0 -7196.0 NULL 0 1969-12-31 16:00:10.973 NULL 5.0 NULL -0.9589242746631385 NULL +true true NULL true true true NULL false true NULL -24 -7196 NULL 829111000 -24 -7196 NULL -7 NULL NULL -24 -24 -24 -24.0 -7196.0 NULL 8.29111E8 -24.0 -7196.0 NULL -6.855 NULL NULL NULL -7196.0 1969-12-31 15:59:59.976 1969-12-31 15:59:52.804 NULL 1970-01-10 06:18:31 1969-12-31 15:59:36 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 15:59:53.145 NULL NULL -24 -7196 NULL 829111000 -24.0 -7196.0 NULL 0 1969-12-31 15:59:53.145 NULL -24.0 NULL 0.9055783620066238 NULL +true true NULL true true true NULL false true NULL -50 -7196 NULL -1031187250 -50 -7196 NULL -6 NULL NULL -50 -50 -50 -50.0 -7196.0 NULL -1.03118725E9 -50.0 -7196.0 NULL -5.267 NULL NULL NULL -7196.0 1969-12-31 15:59:59.95 1969-12-31 15:59:52.804 NULL 1969-12-19 17:33:32.75 1969-12-31 15:59:10 1969-12-31 14:00:04 NULL 1969-12-31 16:00:00 1969-12-31 15:59:54.733 NULL NULL -50 -7196 NULL -1031187250 -50.0 -7196.0 NULL 0 1969-12-31 15:59:54.733 NULL -50.0 NULL 0.26237485370392877 NULL +true NULL true true true NULL true false true true 11 NULL -64615982 1803053750 11 NULL 1 2 NULL 8 11 11 11 11.0 NULL -6.4615982E7 1.80305375E9 11.0 NULL 1.0 2.351 NULL 8.0 -6.4615984E7 NULL 1969-12-31 16:00:00.011 NULL 1969-12-30 22:03:04.018 1970-01-21 12:50:53.75 1969-12-31 16:00:11 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:02.351 NULL NULL 11 NULL -64615982 1803053750 11.0 NULL TRUE 0 1969-12-31 16:00:02.351 8J5OB7K26PEV7kdbeHr3 11.0 -1.29231964E8 -0.9999902065507035 -6.4615983E7 +true NULL true true true NULL true false true true 11 NULL -335450417 1233327000 11 NULL 1 2 NULL NULL 11 11 11 11.0 NULL -3.35450417E8 1.233327E9 11.0 NULL 1.0 2.351 NULL NULL -3.35450432E8 NULL 1969-12-31 16:00:00.011 NULL 1969-12-27 18:49:09.583 1970-01-14 22:35:27 1969-12-31 16:00:11 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:02.351 NULL NULL 11 NULL -335450417 1233327000 11.0 NULL TRUE 0 1969-12-31 16:00:02.351 dOYnqgaXoJ1P3ERwxe5N7 11.0 -6.70900834E8 -0.9999902065507035 -3.35450431E8 Index: ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vectorized_date_funcs.q.out (working copy) @@ -0,0 +1,1019 @@ +PREHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. + +CREATE TABLE date_udf_flight ( + origin_city_name STRING, + dest_city_name STRING, + fl_date DATE, + arr_delay FLOAT, + fl_num INT +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@date_udf_flight +POSTHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. + +CREATE TABLE date_udf_flight ( + origin_city_name STRING, + dest_city_name STRING, + fl_date DATE, + arr_delay FLOAT, + fl_num INT +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_udf_flight +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@date_udf_flight +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@date_udf_flight +PREHOOK: query: CREATE TABLE date_udf_flight_orc ( + fl_date DATE, + fl_time TIMESTAMP +) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@date_udf_flight_orc +POSTHOOK: query: CREATE TABLE date_udf_flight_orc ( + fl_date DATE, + fl_time TIMESTAMP +) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_udf_flight_orc +PREHOOK: query: INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') FROM date_udf_flight +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight +PREHOOK: Output: default@date_udf_flight_orc +POSTHOOK: query: INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') FROM date_udf_flight +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight +POSTHOOK: Output: default@date_udf_flight_orc +POSTHOOK: Lineage: date_udf_flight_orc.fl_date SIMPLE [(date_udf_flight)date_udf_flight.FieldSchema(name:fl_date, type:date, comment:null), ] +POSTHOOK: Lineage: date_udf_flight_orc.fl_time EXPRESSION [(date_udf_flight)date_udf_flight.FieldSchema(name:fl_date, type:date, comment:null), ] +PREHOOK: query: SELECT * FROM date_udf_flight_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM date_udf_flight_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-31 2010-10-31 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-22 2010-10-22 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-30 2010-10-30 07:00:00 +2010-10-20 2010-10-20 07:00:00 +2010-10-21 2010-10-21 07:00:00 +2010-10-23 2010-10-23 07:00:00 +2010-10-24 2010-10-24 07:00:00 +2010-10-25 2010-10-25 07:00:00 +2010-10-26 2010-10-26 07:00:00 +2010-10-27 2010-10-27 07:00:00 +2010-10-28 2010-10-28 07:00:00 +2010-10-29 2010-10-29 07:00:00 +2010-10-31 2010-10-31 07:00:00 +PREHOOK: query: EXPLAIN SELECT + to_unix_timestamp(fl_time), + year(fl_time), + month(fl_time), + day(fl_time), + dayofmonth(fl_time), + weekofyear(fl_time), + date(fl_time), + to_date(fl_time), + date_add(fl_time, 2), + date_sub(fl_time, 2), + datediff(fl_time, "2000-01-01") +FROM date_udf_flight_orc +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + to_unix_timestamp(fl_time), + year(fl_time), + month(fl_time), + day(fl_time), + dayofmonth(fl_time), + weekofyear(fl_time), + date(fl_time), + to_date(fl_time), + date_add(fl_time, 2), + date_sub(fl_time, 2), + datediff(fl_time, "2000-01-01") +FROM date_udf_flight_orc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: date_udf_flight_orc + Select Operator + expressions: to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: string), date_add(fl_time, 2) (type: string), date_sub(fl_time, 2) (type: string), datediff(fl_time, '2000-01-01') (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 + ListSink + +PREHOOK: query: SELECT + to_unix_timestamp(fl_time), + year(fl_time), + month(fl_time), + day(fl_time), + dayofmonth(fl_time), + weekofyear(fl_time), + date(fl_time), + to_date(fl_time), + date_add(fl_time, 2), + date_sub(fl_time, 2), + datediff(fl_time, "2000-01-01") +FROM date_udf_flight_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + to_unix_timestamp(fl_time), + year(fl_time), + month(fl_time), + day(fl_time), + dayofmonth(fl_time), + weekofyear(fl_time), + date(fl_time), + to_date(fl_time), + date_add(fl_time, 2), + date_sub(fl_time, 2), + datediff(fl_time, "2000-01-01") +FROM date_udf_flight_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287756000 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288447200 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1287583200 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287669600 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287842400 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287928800 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1288015200 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288101600 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288188000 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288274400 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288360800 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288533600 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +PREHOOK: query: EXPLAIN SELECT + to_unix_timestamp(fl_date), + year(fl_date), + month(fl_date), + day(fl_date), + dayofmonth(fl_date), + weekofyear(fl_date), + date(fl_date), + to_date(fl_date), + date_add(fl_date, 2), + date_sub(fl_date, 2), + datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + to_unix_timestamp(fl_date), + year(fl_date), + month(fl_date), + day(fl_date), + dayofmonth(fl_date), + weekofyear(fl_date), + date(fl_date), + to_date(fl_date), + date_add(fl_date, 2), + date_sub(fl_date, 2), + datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: date_udf_flight_orc + Select Operator + expressions: to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), weekofyear(fl_date) (type: int), CAST( fl_date AS DATE) (type: date), to_date(fl_date) (type: string), date_add(fl_date, 2) (type: string), date_sub(fl_date, 2) (type: string), datediff(fl_date, '2000-01-01') (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 + ListSink + +PREHOOK: query: SELECT + to_unix_timestamp(fl_date), + year(fl_date), + month(fl_date), + day(fl_date), + dayofmonth(fl_date), + weekofyear(fl_date), + date(fl_date), + to_date(fl_date), + date_add(fl_date, 2), + date_sub(fl_date, 2), + datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + to_unix_timestamp(fl_date), + year(fl_date), + month(fl_date), + day(fl_date), + dayofmonth(fl_date), + weekofyear(fl_date), + date(fl_date), + to_date(fl_date), + date_add(fl_date, 2), + date_sub(fl_date, 2), + datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287730800 2010 10 22 22 42 2010-10-22 2010-10-22 2010-10-24 2010-10-20 3947 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1288422000 2010 10 30 30 43 2010-10-30 2010-10-30 2010-11-01 2010-10-28 3955 +1287558000 2010 10 20 20 42 2010-10-20 2010-10-20 2010-10-22 2010-10-18 3945 +1287644400 2010 10 21 21 42 2010-10-21 2010-10-21 2010-10-23 2010-10-19 3946 +1287817200 2010 10 23 23 42 2010-10-23 2010-10-23 2010-10-25 2010-10-21 3948 +1287903600 2010 10 24 24 42 2010-10-24 2010-10-24 2010-10-26 2010-10-22 3949 +1287990000 2010 10 25 25 43 2010-10-25 2010-10-25 2010-10-27 2010-10-23 3950 +1288076400 2010 10 26 26 43 2010-10-26 2010-10-26 2010-10-28 2010-10-24 3951 +1288162800 2010 10 27 27 43 2010-10-27 2010-10-27 2010-10-29 2010-10-25 3952 +1288249200 2010 10 28 28 43 2010-10-28 2010-10-28 2010-10-30 2010-10-26 3953 +1288335600 2010 10 29 29 43 2010-10-29 2010-10-29 2010-10-31 2010-10-27 3954 +1288508400 2010 10 31 31 43 2010-10-31 2010-10-31 2010-11-02 2010-10-29 3956 +PREHOOK: query: EXPLAIN SELECT + year(fl_time) = year(fl_date), + month(fl_time) = month(fl_date), + day(fl_time) = day(fl_date), + dayofmonth(fl_time) = dayofmonth(fl_date), + weekofyear(fl_time) = weekofyear(fl_date), + date(fl_time) = date(fl_date), + to_date(fl_time) = to_date(fl_date), + date_add(fl_time, 2) = date_add(fl_date, 2), + date_sub(fl_time, 2) = date_sub(fl_date, 2), + datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + year(fl_time) = year(fl_date), + month(fl_time) = month(fl_date), + day(fl_time) = day(fl_date), + dayofmonth(fl_time) = dayofmonth(fl_date), + weekofyear(fl_time) = weekofyear(fl_date), + date(fl_time) = date(fl_date), + to_date(fl_time) = to_date(fl_date), + date_add(fl_time, 2) = date_add(fl_date, 2), + date_sub(fl_time, 2) = date_sub(fl_date, 2), + datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: date_udf_flight_orc + Select Operator + expressions: (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = CAST( fl_date AS DATE)) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + ListSink + +PREHOOK: query: -- Should all be true or NULL +SELECT + year(fl_time) = year(fl_date), + month(fl_time) = month(fl_date), + day(fl_time) = day(fl_date), + dayofmonth(fl_time) = dayofmonth(fl_date), + weekofyear(fl_time) = weekofyear(fl_date), + date(fl_time) = date(fl_date), + to_date(fl_time) = to_date(fl_date), + date_add(fl_time, 2) = date_add(fl_date, 2), + date_sub(fl_time, 2) = date_sub(fl_date, 2), + datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: -- Should all be true or NULL +SELECT + year(fl_time) = year(fl_date), + month(fl_time) = month(fl_date), + day(fl_time) = day(fl_date), + dayofmonth(fl_time) = dayofmonth(fl_date), + weekofyear(fl_time) = weekofyear(fl_date), + date(fl_time) = date(fl_date), + to_date(fl_time) = to_date(fl_date), + date_add(fl_time, 2) = date_add(fl_date, 2), + date_sub(fl_time, 2) = date_sub(fl_date, 2), + datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01") +FROM date_udf_flight_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +true true true true true true true true true true +PREHOOK: query: EXPLAIN SELECT + fl_date, + to_date(date_add(fl_date, 2)), + to_date(date_sub(fl_date, 2)), + datediff(fl_date, date_add(fl_date, 2)), + datediff(fl_date, date_sub(fl_date, 2)), + datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) +FROM date_udf_flight_orc LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + fl_date, + to_date(date_add(fl_date, 2)), + to_date(date_sub(fl_date, 2)), + datediff(fl_date, date_add(fl_date, 2)), + datediff(fl_date, date_sub(fl_date, 2)), + datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) +FROM date_udf_flight_orc LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: date_udf_flight_orc + Select Operator + expressions: fl_date (type: date), to_date(date_add(fl_date, 2)) (type: string), to_date(date_sub(fl_date, 2)) (type: string), datediff(fl_date, date_add(fl_date, 2)) (type: int), datediff(fl_date, date_sub(fl_date, 2)) (type: int), datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT + fl_date, + to_date(date_add(fl_date, 2)), + to_date(date_sub(fl_date, 2)), + datediff(fl_date, date_add(fl_date, 2)), + datediff(fl_date, date_sub(fl_date, 2)), + datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) +FROM date_udf_flight_orc LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + fl_date, + to_date(date_add(fl_date, 2)), + to_date(date_sub(fl_date, 2)), + datediff(fl_date, date_add(fl_date, 2)), + datediff(fl_date, date_sub(fl_date, 2)), + datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) +FROM date_udf_flight_orc LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-20 2010-10-22 2010-10-18 -1 2 4 +2010-10-21 2010-10-23 2010-10-19 -1 2 4 +2010-10-21 2010-10-23 2010-10-19 -1 2 4 +PREHOOK: query: -- Test extracting the date part of expression that includes time +SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: -- Test extracting the date part of expression that includes time +SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +2009-07-30 +PREHOOK: query: EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: date_udf_flight_orc + Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: fl_date (type: date) + outputColumnNames: fl_date + Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(fl_date), max(fl_date), count(fl_date), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: date) + sort order: + + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +2010-10-20 2010-10-31 137 137 Index: ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out (working copy) @@ -0,0 +1,157 @@ +PREHOOK: query: create table dtest(a int, b int) clustered by (a) sorted by (a) into 1 buckets stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dtest +POSTHOOK: query: create table dtest(a int, b int) clustered by (a) sorted by (a) into 1 buckets stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dtest +PREHOOK: query: insert into table dtest select c,b from (select array(300,300,300,300,300) as a, 1 as b from src limit 1) y lateral view explode(a) t1 as c +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dtest +POSTHOOK: query: insert into table dtest select c,b from (select array(300,300,300,300,300) as a, 1 as b from src limit 1) y lateral view explode(a) t1 as c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dtest +POSTHOOK: Lineage: dtest.a SIMPLE [] +POSTHOOK: Lineage: dtest.b EXPRESSION [] +PREHOOK: query: explain select sum(distinct a), count(distinct a) from dtest +PREHOOK: type: QUERY +POSTHOOK: query: explain select sum(distinct a), count(distinct a) from dtest +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: dtest + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int) + outputColumnNames: a + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(DISTINCT a), count(DISTINCT a) + bucketGroup: true + keys: a (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select sum(distinct a), count(distinct a) from dtest +PREHOOK: type: QUERY +PREHOOK: Input: default@dtest +#### A masked pattern was here #### +POSTHOOK: query: select sum(distinct a), count(distinct a) from dtest +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dtest +#### A masked pattern was here #### +300 1 +PREHOOK: query: explain select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc +PREHOOK: type: QUERY +POSTHOOK: query: explain select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: cint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(DISTINCT cint), count(DISTINCT cint), avg(DISTINCT cint), std(DISTINCT cint) + keys: cint (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), avg(DISTINCT KEY._col0:2._col0), std(DISTINCT KEY._col0:3._col0) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-3482841611 6082 -572647.4204209142 6.153814687328991E8 Index: ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out (working copy) @@ -2,9 +2,9 @@ PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE part_staging POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE part +PREHOOK: query: DROP TABLE part_orc PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part +POSTHOOK: query: DROP TABLE part_orc POSTHOOK: type: DROPTABLE PREHOOK: query: -- NOTE: This test is a copy of ptf. -- NOTE: We cannot vectorize "pure" table functions (e.g. NOOP) -- given their blackbox nature. So only queries without table functions and @@ -52,7 +52,7 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_staging -PREHOOK: query: CREATE TABLE part( +PREHOOK: query: CREATE TABLE part_orc( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -65,8 +65,8 @@ ) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( +PREHOOK: Output: default@part_orc +POSTHOOK: query: CREATE TABLE part_orc( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -79,13 +79,13 @@ ) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: DESCRIBE EXTENDED part +POSTHOOK: Output: default@part_orc +PREHOOK: query: DESCRIBE EXTENDED part_orc PREHOOK: type: DESCTABLE -PREHOOK: Input: default@part -POSTHOOK: query: DESCRIBE EXTENDED part +PREHOOK: Input: default@part_orc +POSTHOOK: query: DESCRIBE EXTENDED part_orc POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc p_partkey int p_name string p_mfgr string @@ -97,23 +97,23 @@ p_comment string #### A masked pattern was here #### -PREHOOK: query: insert into table part select * from part_staging +PREHOOK: query: insert into table part_orc select * from part_staging PREHOOK: type: QUERY PREHOOK: Input: default@part_staging -PREHOOK: Output: default@part -POSTHOOK: query: insert into table part select * from part_staging +PREHOOK: Output: default@part_orc +POSTHOOK: query: insert into table part_orc select * from part_staging POSTHOOK: type: QUERY POSTHOOK: Input: default@part_staging -POSTHOOK: Output: default@part -POSTHOOK: Lineage: part.p_brand SIMPLE [(part_staging)part_staging.FieldSchema(name:p_brand, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_comment SIMPLE [(part_staging)part_staging.FieldSchema(name:p_comment, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_container SIMPLE [(part_staging)part_staging.FieldSchema(name:p_container, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_mfgr SIMPLE [(part_staging)part_staging.FieldSchema(name:p_mfgr, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_name SIMPLE [(part_staging)part_staging.FieldSchema(name:p_name, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_partkey SIMPLE [(part_staging)part_staging.FieldSchema(name:p_partkey, type:int, comment:null), ] -POSTHOOK: Lineage: part.p_retailprice SIMPLE [(part_staging)part_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] -POSTHOOK: Lineage: part.p_size SIMPLE [(part_staging)part_staging.FieldSchema(name:p_size, type:int, comment:null), ] -POSTHOOK: Lineage: part.p_type SIMPLE [(part_staging)part_staging.FieldSchema(name:p_type, type:string, comment:null), ] +POSTHOOK: Output: default@part_orc +POSTHOOK: Lineage: part_orc.p_brand SIMPLE [(part_staging)part_staging.FieldSchema(name:p_brand, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_comment SIMPLE [(part_staging)part_staging.FieldSchema(name:p_comment, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_container SIMPLE [(part_staging)part_staging.FieldSchema(name:p_container, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_mfgr SIMPLE [(part_staging)part_staging.FieldSchema(name:p_mfgr, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_name SIMPLE [(part_staging)part_staging.FieldSchema(name:p_name, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_partkey SIMPLE [(part_staging)part_staging.FieldSchema(name:p_partkey, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc.p_retailprice SIMPLE [(part_staging)part_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] +POSTHOOK: Lineage: part_orc.p_size SIMPLE [(part_staging)part_staging.FieldSchema(name:p_size, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc.p_type SIMPLE [(part_staging)part_staging.FieldSchema(name:p_type, type:string, comment:null), ] PREHOOK: query: --1. test1 explain extended @@ -121,7 +121,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -133,7 +133,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -146,7 +146,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -231,7 +231,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -247,7 +247,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -257,11 +257,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -277,20 +277,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -353,23 +353,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -402,7 +402,7 @@ explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY @@ -411,7 +411,7 @@ explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) POSTHOOK: type: QUERY @@ -427,11 +427,11 @@ TOK_JOIN TOK_TABREF TOK_TABNAME - part + part_orc p1 TOK_TABREF TOK_TABNAME - part + part_orc p2 = . @@ -537,7 +537,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -547,11 +547,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -567,20 +567,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] Execution mode: vectorized Map 5 Map Operator Tree: @@ -604,7 +604,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -614,11 +614,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -634,20 +634,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p2] + /part_orc [p2] Execution mode: vectorized Reducer 2 Needs Tagging: false @@ -733,19 +733,19 @@ PREHOOK: query: select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 0 Manufacturer#1 almond antique burnished rose metallic 2 0 @@ -779,7 +779,7 @@ explain extended select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -787,7 +787,7 @@ explain extended select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -799,7 +799,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -838,7 +838,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -854,7 +854,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -864,11 +864,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -884,20 +884,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -938,18 +938,18 @@ ListSink PREHOOK: query: select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 Manufacturer#1 almond antique burnished rose metallic 2 @@ -984,7 +984,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc @@ -996,7 +996,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc @@ -1010,7 +1010,7 @@ abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1095,7 +1095,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -1111,7 +1111,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1121,11 +1121,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1141,20 +1141,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -1217,23 +1217,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -1268,7 +1268,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1280,7 +1280,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1293,7 +1293,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1383,7 +1383,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -1399,7 +1399,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1409,11 +1409,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1429,20 +1429,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -1505,23 +1505,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 @@ -1556,7 +1556,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1569,7 +1569,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1583,7 +1583,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1681,7 +1681,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -1697,7 +1697,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1707,11 +1707,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1727,20 +1727,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -1824,25 +1824,25 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) group by p_mfgr, p_name, p_size PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) group by p_mfgr, p_name, p_size POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 Manufacturer#1 almond antique chartreuse lavender yellow 34 2 2 34 32 @@ -1873,19 +1873,19 @@ explain extended select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY POSTHOOK: query: -- 7. testJoin explain extended select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: @@ -1897,7 +1897,7 @@ abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1908,7 +1908,7 @@ p_name TOK_TABREF TOK_TABNAME - part + part_orc p1 = . @@ -1964,7 +1964,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1974,11 +1974,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1994,25 +1994,25 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] Execution mode: vectorized Map 3 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -2028,7 +2028,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2038,11 +2038,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2058,20 +2058,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -2140,20 +2140,20 @@ ListSink PREHOOK: query: select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### 15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu 17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the @@ -2187,7 +2187,7 @@ explain extended select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey @@ -2196,7 +2196,7 @@ explain extended select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey @@ -2208,14 +2208,14 @@ TOK_JOIN TOK_TABREF TOK_TABNAME - part + part_orc p1 TOK_PTBLFUNCTION noop abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -2278,7 +2278,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2288,11 +2288,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2308,25 +2308,25 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] Execution mode: vectorized Map 3 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -2342,7 +2342,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2352,11 +2352,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2372,20 +2372,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -2454,20 +2454,20 @@ ListSink PREHOOK: query: select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### 15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu 17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the @@ -2502,7 +2502,7 @@ explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY @@ -2511,7 +2511,7 @@ explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) POSTHOOK: type: QUERY @@ -2523,7 +2523,7 @@ noopwithmap TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -2582,7 +2582,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false PTF Operator @@ -2600,7 +2600,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2610,11 +2610,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2630,20 +2630,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -2704,19 +2704,19 @@ PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 Manufacturer#1 almond antique burnished rose metallic 2 1 @@ -2751,7 +2751,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -2762,7 +2762,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -2774,7 +2774,7 @@ noopwithmap TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -2859,7 +2859,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false PTF Operator @@ -2877,7 +2877,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2887,11 +2887,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2907,20 +2907,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -2983,21 +2983,21 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -3032,7 +3032,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -3043,7 +3043,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -3055,7 +3055,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3140,7 +3140,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -3156,7 +3156,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -3166,11 +3166,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -3186,20 +3186,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -3262,21 +3262,21 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -3311,7 +3311,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) @@ -3323,7 +3323,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) @@ -3340,7 +3340,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3429,7 +3429,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -3445,7 +3445,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -3455,11 +3455,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -3475,20 +3475,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -3568,23 +3568,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -3621,7 +3621,7 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -3636,7 +3636,7 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -3653,7 +3653,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3755,7 +3755,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -3771,7 +3771,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -3781,11 +3781,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -3801,20 +3801,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -3879,13 +3879,13 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) ) sub1 PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, sub1.cd, sub1.s1 @@ -3893,13 +3893,13 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) ) sub1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 4100.06 Manufacturer#1 almond antique burnished rose metallic 2 5702.650000000001 @@ -3936,10 +3936,10 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount @@ -3950,10 +3950,10 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: @@ -3965,7 +3965,7 @@ abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3976,7 +3976,7 @@ p_name TOK_TABREF TOK_TABNAME - part + part_orc p1 = . @@ -4160,7 +4160,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4170,11 +4170,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4190,25 +4190,25 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] Execution mode: vectorized Map 4 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -4224,7 +4224,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4234,11 +4234,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4254,20 +4254,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -4356,12 +4356,12 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, @@ -4369,12 +4369,12 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 1173.15 2 0 Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 2346.3 2 0 @@ -4408,7 +4408,7 @@ explain extended select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -4416,7 +4416,7 @@ explain extended select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -4428,7 +4428,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -4468,7 +4468,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -4484,7 +4484,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4494,11 +4494,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4514,20 +4514,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -4593,18 +4593,18 @@ ListSink PREHOOK: query: select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 Manufacturer#1 almond antique chartreuse lavender yellow 34 @@ -4635,20 +4635,20 @@ create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s -from part +from part_orc group by p_mfgr, p_brand PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_price_view POSTHOOK: query: -- 16. testViewAsTableInputToPTF create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s -from part +from part_orc group by p_mfgr, p_brand POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_price_view PREHOOK: query: explain extended @@ -4740,7 +4740,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -4766,7 +4766,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4776,11 +4776,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4796,20 +4796,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Execution mode: vectorized Reducer 2 Needs Tagging: false @@ -4895,7 +4895,7 @@ window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row) PREHOOK: type: QUERY PREHOOK: Input: default@mfgr_price_view -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_brand, s, sum(s) over w1 as s1 @@ -4905,7 +4905,7 @@ window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row) POSTHOOK: type: QUERY POSTHOOK: Input: default@mfgr_price_view -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 Brand#12 4800.84 4800.84 Manufacturer#1 Brand#14 2346.3 7147.14 @@ -4970,7 +4970,7 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@part_5 PREHOOK: query: explain extended -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -4986,7 +4986,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY POSTHOOK: query: explain extended -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -5009,7 +5009,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -5215,7 +5215,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -5231,7 +5231,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -5241,11 +5241,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -5261,20 +5261,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -5447,7 +5447,7 @@ Stats-Aggr Operator #### A masked pattern was here #### -PREHOOK: query: from noop(on part +PREHOOK: query: from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -5462,10 +5462,10 @@ first_value(p_size, true) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc PREHOOK: Output: default@part_4 PREHOOK: Output: default@part_5 -POSTHOOK: query: from noop(on part +POSTHOOK: query: from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -5480,23 +5480,23 @@ first_value(p_size, true) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc POSTHOOK: Output: default@part_4 POSTHOOK: Output: default@part_5 -POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.dr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_4 PREHOOK: type: QUERY PREHOOK: Input: default@part_4 @@ -5575,7 +5575,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -5594,7 +5594,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -5617,7 +5617,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -5723,7 +5723,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -5739,7 +5739,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -5749,11 +5749,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -5769,20 +5769,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -5865,7 +5865,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -5874,7 +5874,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, @@ -5883,7 +5883,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -5892,7 +5892,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -5930,7 +5930,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -5949,7 +5949,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -5972,7 +5972,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6080,7 +6080,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -6096,7 +6096,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -6106,11 +6106,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -6126,20 +6126,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -6235,7 +6235,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6244,7 +6244,7 @@ partition by p_mfgr order by p_mfgr ) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -6253,7 +6253,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6262,7 +6262,7 @@ partition by p_mfgr order by p_mfgr ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -6300,7 +6300,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -6317,7 +6317,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -6338,7 +6338,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6433,7 +6433,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -6449,7 +6449,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -6459,11 +6459,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -6479,20 +6479,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -6573,14 +6573,14 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) partition by p_mfgr order by p_mfgr)) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -6589,14 +6589,14 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) partition by p_mfgr order by p_mfgr)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -6634,7 +6634,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -6653,7 +6653,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -6676,7 +6676,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6783,7 +6783,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -6799,7 +6799,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -6809,11 +6809,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -6829,20 +6829,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -6940,7 +6940,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -6949,7 +6949,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, @@ -6958,7 +6958,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -6967,7 +6967,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -7006,7 +7006,7 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr @@ -7024,7 +7024,7 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr @@ -7043,7 +7043,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7181,7 +7181,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -7197,7 +7197,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -7207,11 +7207,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -7227,20 +7227,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -7324,14 +7324,14 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr order by p_mfgr )) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, @@ -7341,14 +7341,14 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr order by p_mfgr )) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 @@ -7387,7 +7387,7 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) @@ -7403,7 +7403,7 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) @@ -7420,7 +7420,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7530,7 +7530,7 @@ Map 1 Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -7546,7 +7546,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -7556,11 +7556,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -7576,20 +7576,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Reducer 2 Needs Tagging: false Reduce Operator Tree: @@ -7673,12 +7673,12 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -7688,12 +7688,12 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 Index: ql/src/test/results/clientpositive/udf_explode.q.out =================================================================== --- ql/src/test/results/clientpositive/udf_explode.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/udf_explode.q.out (working copy) @@ -282,17 +282,17 @@ keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -639,17 +639,17 @@ keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat Index: ql/src/test/results/clientpositive/udtf_explode.q.out =================================================================== --- ql/src/test/results/clientpositive/udtf_explode.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/udtf_explode.q.out (working copy) @@ -348,17 +348,17 @@ keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -678,17 +678,17 @@ keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat Index: ql/src/test/results/clientpositive/udtf_stack.q.out =================================================================== --- ql/src/test/results/clientpositive/udtf_stack.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/udtf_stack.q.out (working copy) @@ -21,10 +21,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 111000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 245000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 @@ -48,7 +48,7 @@ function name: stack Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 111000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 245000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 @@ -88,10 +88,10 @@ Lateral View Forward Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 134000 Basic stats: COMPLETE Column stats: COMPLETE Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 135000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 269000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 @@ -115,7 +115,7 @@ function name: stack Lateral View Join Operator outputColumnNames: _col5, _col6 - Statistics: Num rows: 1000 Data size: 135000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 269000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Index: ql/src/test/results/clientpositive/union11.q.out =================================================================== --- ql/src/test/results/clientpositive/union11.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union11.q.out (working copy) @@ -126,14 +126,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union14.q.out =================================================================== --- ql/src/test/results/clientpositive/union14.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union14.q.out (working copy) @@ -109,14 +109,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union15.q.out =================================================================== --- ql/src/test/results/clientpositive/union15.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union15.q.out (working copy) @@ -136,14 +136,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union17.q.out =================================================================== --- ql/src/test/results/clientpositive/union17.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union17.q.out (working copy) @@ -114,7 +114,7 @@ keys: VALUE._col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -126,7 +126,7 @@ keys: VALUE._col0 (type: string), VALUE._col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -142,7 +142,7 @@ key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -150,14 +150,14 @@ keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -185,7 +185,7 @@ key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -193,14 +193,14 @@ keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union19.q.out =================================================================== --- ql/src/test/results/clientpositive/union19.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union19.q.out (working copy) @@ -152,14 +152,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union21.q.out =================================================================== --- ql/src/test/results/clientpositive/union21.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union21.q.out (working copy) @@ -173,14 +173,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union26.q.out =================================================================== --- ql/src/test/results/clientpositive/union26.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union26.q.out (working copy) @@ -103,91 +103,88 @@ Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((ds = '2008-04-08') and (hr = '11')) (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Lateral View Forward - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: key, value - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Lateral View Forward + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Lateral View Join Operator + outputColumnNames: _col0, _col1, _col7 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Select Operator + expressions: array(1,2,3) (type: array) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + UDTF Operator + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + function name: explode Lateral View Join Operator outputColumnNames: _col0, _col1, _col7 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) - Select Operator - expressions: array(1,2,3) (type: array) - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - UDTF Operator - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - function name: explode - Lateral View Join Operator - outputColumnNames: _col0, _col1, _col7 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 4000 Data size: 42496 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - keys: _col0 (type: string), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: bigint) TableScan Union - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 4275 Data size: 45417 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1275 Data size: 13545 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -195,14 +192,14 @@ keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2137 Data size: 22703 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 637 Data size: 6767 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: bigint), _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2137 Data size: 22703 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 637 Data size: 6767 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2137 Data size: 22703 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 637 Data size: 6767 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -238,8 +235,6 @@ PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT count(1) as counts, @@ -265,8 +260,6 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 10 100 val_100 10 103 val_103 @@ -601,8 +594,6 @@ PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT count(1) as counts, @@ -628,8 +619,6 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 10 100 val_100 10 103 val_103 @@ -964,8 +953,6 @@ PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT count(1) as counts, @@ -991,8 +978,6 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 10 100 val_100 10 103 val_103 Index: ql/src/test/results/clientpositive/union5.q.out =================================================================== --- ql/src/test/results/clientpositive/union5.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union5.q.out (working copy) @@ -102,14 +102,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union7.q.out =================================================================== --- ql/src/test/results/clientpositive/union7.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/union7.q.out (working copy) @@ -107,14 +107,14 @@ keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/vector_aggregate_9.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_aggregate_9.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_aggregate_9.q.out (working copy) @@ -0,0 +1,167 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dc (type: decimal(38,18)) + outputColumnNames: dc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(dc), max(dc), sum(dc), avg(dc) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col3 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), sum(VALUE._col2), avg(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col3 (type: decimal(38,18)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +-4997414117561.546875 4994550248722.298828 -10252745435816.024410 -5399023399.587163986308583465 Index: ql/src/test/results/clientpositive/vector_between_in.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_between_in.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vector_between_in.q.out (working copy) @@ -662,15 +662,15 @@ 14.9324324324 19.1135135135 20.3081081081 -22.1000000000 +22.1 24.4891891892 33.4486486486 34.6432432432 40.0189189189 42.4081081081 43.0054054054 -44.2000000000 -44.2000000000 +44.2 +44.2 44.7972972973 45.9918918919 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 Index: ql/src/test/results/clientpositive/vector_decimal_1.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_1.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_1.q.out (working copy) @@ -0,0 +1,528 @@ +PREHOOK: query: drop table if exists decimal_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists decimal_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_1 +POSTHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_1 +PREHOOK: query: desc decimal_1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@decimal_1 +POSTHOOK: query: desc decimal_1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@decimal_1 +t decimal(4,2) +u decimal(5,0) +v decimal(10,0) +PREHOOK: query: insert overwrite table decimal_1 + select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@decimal_1 +POSTHOOK: query: insert overwrite table decimal_1 + select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@decimal_1 +POSTHOOK: Lineage: decimal_1.t EXPRESSION [] +POSTHOOK: Lineage: decimal_1.u EXPRESSION [] +POSTHOOK: Lineage: decimal_1.v EXPRESSION [] +PREHOOK: query: explain +select cast(t as boolean) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as boolean) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToBoolean(t) (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as boolean) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as boolean) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +true +PREHOOK: query: explain +select cast(t as tinyint) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as tinyint) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToByte(t) (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as tinyint) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as tinyint) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as smallint) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as smallint) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToShort(t) (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as smallint) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as smallint) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as int) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as int) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(t) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as int) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as int) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as bigint) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as bigint) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToLong(t) (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as bigint) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as bigint) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as float) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as float) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToFloat(t) (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as float) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as float) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as double) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as double) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(t) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as double) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as double) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as string) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as string) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToString(t) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as string) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as string) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as timestamp) from decimal_1 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as timestamp) from decimal_1 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_1 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS TIMESTAMP) (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: timestamp) + sort order: + + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as timestamp) from decimal_1 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as timestamp) from decimal_1 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_1 +#### A masked pattern was here #### +1969-12-31 16:00:17.29 +PREHOOK: query: drop table decimal_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_1 +PREHOOK: Output: default@decimal_1 +POSTHOOK: query: drop table decimal_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_1 +POSTHOOK: Output: default@decimal_1 Index: ql/src/test/results/clientpositive/vector_decimal_10_0.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_10_0.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_10_0.q.out (working copy) @@ -0,0 +1,105 @@ +PREHOOK: query: DROP TABLE IF EXISTS decimal_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS decimal_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS decimal +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS decimal +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE decimal_txt (dec decimal) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_txt +POSTHOOK: query: CREATE TABLE decimal_txt (dec decimal) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_txt +PREHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_txt +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL +POSTHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_txt +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL +PREHOOK: query: EXPLAIN +SELECT dec FROM DECIMAL order by dec +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT dec FROM DECIMAL order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT dec FROM DECIMAL order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec FROM DECIMAL order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal +#### A masked pattern was here #### +NULL +1000000000 +PREHOOK: query: DROP TABLE DECIMAL_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_txt +PREHOOK: Output: default@decimal_txt +POSTHOOK: query: DROP TABLE DECIMAL_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_txt +POSTHOOK: Output: default@decimal_txt +PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal +PREHOOK: Output: default@decimal +POSTHOOK: query: DROP TABLE DECIMAL +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal +POSTHOOK: Output: default@decimal Index: ql/src/test/results/clientpositive/vector_decimal_2.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_2.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_2.q.out (working copy) @@ -0,0 +1,1487 @@ +PREHOOK: query: drop table decimal_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table decimal_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_2 +PREHOOK: query: insert overwrite table decimal_2 + select cast('17.29' as decimal(4,2)) from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: insert overwrite table decimal_2 + select cast('17.29' as decimal(4,2)) from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@decimal_2 +POSTHOOK: Lineage: decimal_2.t EXPRESSION [] +PREHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToBoolean(t) (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +true +PREHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToByte(t) (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToShort(t) (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(t) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToLong(t) (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17 +PREHOOK: query: explain +select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToFloat(t) (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(t) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17.29 +PREHOOK: query: explain +select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToString(t) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +17.29 +PREHOOK: query: insert overwrite table decimal_2 + select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: insert overwrite table decimal_2 + select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@decimal_2 +POSTHOOK: Lineage: decimal_2.t EXPRESSION [] +PREHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToBoolean(t) (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as boolean) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as boolean) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +true +PREHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToByte(t) (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as tinyint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as tinyint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +13 +PREHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToShort(t) (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: smallint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as smallint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as smallint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +-3827 +PREHOOK: query: explain +select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(t) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as int) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as int) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045 +PREHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToLong(t) (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as bigint) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as bigint) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045 +PREHOOK: query: explain +select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToFloat(t) (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as float) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as float) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045.5 +PREHOOK: query: explain +select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(t) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as double) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as double) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045.5044003 +PREHOOK: query: explain +select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToString(t) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(t as string) from decimal_2 order by t +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(t as string) from decimal_2 order by t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3404045.5044003 +PREHOOK: query: explain +select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3.14 AS decimal(4,2)) (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(4,2)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3.14 +PREHOOK: query: explain +select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3.14 AS decimal(4,2)) (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(4,2)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(4,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3.14 +PREHOOK: query: explain +select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 2012-12-19 11:12:19.1234567 AS decimal(30,8)) (type: decimal(30,8)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(30,8)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(30,8)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +1355944339.1234567 +PREHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( true AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(true as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( true AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(true as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(true as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +1 +PREHOOK: query: explain +select cast(3Y as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3Y as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3Y as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3Y as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(3S as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3S as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3S as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3S as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(cast(3 as int) as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(3L as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(3L as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 3 AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(3L as decimal) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(3L as decimal) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 1.0 AS decimal(20,19)) (type: decimal(20,19)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(20,19)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(20,19)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +1.0 +PREHOOK: query: explain +select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_2 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( '0.99999999999999999999' AS decimal(20,20)) (type: decimal(20,20)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: decimal(20,20)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(20,20)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +POSTHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_2 +#### A masked pattern was here #### +0.99999999999999999999 +PREHOOK: query: drop table decimal_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_2 +PREHOOK: Output: default@decimal_2 +POSTHOOK: query: drop table decimal_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_2 +POSTHOOK: Output: default@decimal_2 Index: ql/src/test/results/clientpositive/vector_decimal_3.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_3.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_3.q.out (working copy) @@ -0,0 +1,374 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_3_txt +POSTHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_3_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_3_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_3_txt +PREHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_3_txt +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_3 +POSTHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_3 +PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-1.12 -1 +-0.333 0 +-0.33 0 +-0.3 0 +0.000000000000000000 0 +0 0 +0 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +1 1 +1.0 1 +1.000000000000000000 1 +1.12 1 +1.122 1 +2 2 +2 2 +3.14 3 +3.14 3 +3.14 3 +3.140 4 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +1234567890.1234567800 1234567890 +200 200 +125.2 125 +124.00 124 +100 100 +20 20 +10 10 +3.140 4 +3.14 3 +3.14 3 +3.14 3 +2 2 +2 2 +1.122 1 +1.12 1 +1.000000000000000000 1 +1.0 1 +1 1 +0.333 0 +0.33 0 +0.3 0 +0.2 0 +0.1 0 +0.02 0 +0.01 0 +0 0 +0 0 +0.000000000000000000 0 +-0.3 0 +-0.33 0 +-0.333 0 +-1.12 -1 +-1.12 -1 +-1.122 -11 +-1255.49 -1255 +-4400 4400 +-1234567890.1234567890 -1234567890 +NULL 0 +PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-1.12 -1 +-0.333 0 +-0.33 0 +-0.3 0 +0.000000000000000000 0 +0 0 +0 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +1 1 +1.0 1 +1.000000000000000000 1 +1.12 1 +1.122 1 +2 2 +2 2 +3.14 3 +3.14 3 +3.14 3 +3.140 4 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL +-1234567890.1234567890 +-4400 +-1255.49 +-1.122 +-1.12 +-0.333 +-0.33 +-0.3 +0.000000000000000000 +0.01 +0.02 +0.1 +0.2 +0.3 +0.33 +0.333 +1 +1.12 +1.122 +2 +3.14 +10 +20 +100 +124.00 +125.2 +200 +1234567890.1234567800 +PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -2 +-0.333 0 +-0.33 0 +-0.3 0 +0.000000000000000000 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +1 3 +1.12 1 +1.122 1 +2 4 +3.14 13 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +-1234567890 -1234567890.1234567890 +-1255 -1255.49 +-11 -1.122 +-1 -2.24 +0 0.330000000000000000 +1 5.242000000000000000 +2 4 +3 9.42 +4 3.140 +10 10 +20 20 +100 100 +124 124.00 +125 125.2 +200 200 +4400 -4400 +1234567890 1234567890.1234567800 +PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +-1234567890.1234567890 -1234567890 -1234567890.1234567890 -1234567890 +-4400 4400 -4400 4400 +-1255.49 -1255 -1255.49 -1255 +-1.122 -11 -1.122 -11 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-0.333 0 -0.333 0 +-0.33 0 -0.33 0 +-0.3 0 -0.3 0 +0.000000000000000000 0 0.000000000000000000 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0.01 0 0.01 0 +0.02 0 0.02 0 +0.1 0 0.1 0 +0.2 0 0.2 0 +0.3 0 0.3 0 +0.33 0 0.33 0 +0.333 0 0.333 0 +1 1 1 1 +1.0 1 1.0 1 +1.000000000000000000 1 1.000000000000000000 1 +1.12 1 1.12 1 +1.122 1 1.122 1 +2 2 2 2 +2 2 2 2 +2 2 2 2 +2 2 2 2 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.140 4 3.140 4 +10 10 10 10 +20 20 20 20 +100 100 100 100 +124.00 124 124.00 124 +125.2 125 125.2 125 +200 200 200 200 +1234567890.1234567800 1234567890 1234567890.1234567800 1234567890 +PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +3.14 3 +3.14 3 +3.14 3 +3.140 4 +PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +3.14 3 +3.14 3 +3.14 3 +3.140 4 +PREHOOK: query: DROP TABLE DECIMAL_3_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_3_txt +PREHOOK: Output: default@decimal_3_txt +POSTHOOK: query: DROP TABLE DECIMAL_3_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Output: default@decimal_3_txt +PREHOOK: query: DROP TABLE DECIMAL_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_3 +PREHOOK: Output: default@decimal_3 +POSTHOOK: query: DROP TABLE DECIMAL_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_3 +POSTHOOK: Output: default@decimal_3 Index: ql/src/test/results/clientpositive/vector_decimal_4.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_4.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_4.q.out (working copy) @@ -0,0 +1,250 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_4_1 +POSTHOOK: query: CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_4_1 +PREHOOK: query: CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_4_2 +POSTHOOK: query: CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_4_2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_4_1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_4_1 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_1 +PREHOOK: Output: default@decimal_4_2 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_1 +POSTHOOK: Output: default@decimal_4_2 +POSTHOOK: Lineage: decimal_4_2.key SIMPLE [(decimal_4_1)decimal_4_1.FieldSchema(name:key, type:decimal(35,25), comment:null), ] +POSTHOOK: Lineage: decimal_4_2.value EXPRESSION [(decimal_4_1)decimal_4_1.FieldSchema(name:key, type:decimal(35,25), comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_4_1 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_1 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_1 +#### A masked pattern was here #### +NULL 0 +-1234567890.1234567890 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-1.12 -1 +-0.333 0 +-0.33 0 +-0.3 0 +0.0000000000000000000000000 0 +0 0 +0 0 +0.01 0 +0.02 0 +0.1 0 +0.2 0 +0.3 0 +0.33 0 +0.333 0 +0.9999999999999999999999999 1 +1 1 +1.0 1 +1.12 1 +1.122 1 +2 2 +2 2 +3.14 3 +3.14 3 +3.14 3 +3.140 4 +10 10 +20 20 +100 100 +124.00 124 +125.2 125 +200 200 +1234567890.1234567800 1234567890 +PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +NULL NULL +-1234567890.1234567890 -3703703670.3703703670 +-4400 -13200 +-1255.49 -3766.47 +-1.122 -3.366 +-1.12 -3.36 +-1.12 -3.36 +-0.333 -0.999 +-0.33 -0.99 +-0.3 -0.9 +0.0000000000000000000000000 0.0000000000000000000000000 +0 0 +0 0 +0.01 0.03 +0.02 0.06 +0.1 0.3 +0.2 0.6 +0.3 0.9 +0.33 0.99 +0.333 0.999 +0.9999999999999999999999999 2.9999999999999999999999997 +1 3 +1.0 3.0 +1.12 3.36 +1.122 3.366 +2 6 +2 6 +3.14 9.42 +3.14 9.42 +3.14 9.42 +3.140 9.420 +10 30 +20 60 +100 300 +124.00 372.00 +125.2 375.6 +200 600 +1234567890.1234567800 3703703670.3703703400 +PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +NULL NULL +-1234567890.1234567890 -3703703670.3703703670 +-4400 -13200 +-1255.49 -3766.47 +-1.122 -3.366 +-1.12 -3.36 +-1.12 -3.36 +-0.333 -0.999 +-0.33 -0.99 +-0.3 -0.9 +0.0000000000000000000000000 0.0000000000000000000000000 +0 0 +0 0 +0.01 0.03 +0.02 0.06 +0.1 0.3 +0.2 0.6 +0.3 0.9 +0.33 0.99 +0.333 0.999 +0.9999999999999999999999999 2.9999999999999999999999997 +1 3 +1.0 3.0 +1.12 3.36 +1.122 3.366 +2 6 +2 6 +3.14 9.42 +3.14 9.42 +3.14 9.42 +3.140 9.420 +10 30 +20 60 +100 300 +124.00 372.00 +125.2 375.6 +200 600 +1234567890.1234567800 3703703670.3703703400 +PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_4_2 +#### A masked pattern was here #### +NULL NULL +-1234567890.1234567890 -3703703670.3703703670 +-4400 -13200 +-1255.49 -3766.47 +-1.122 -3.366 +-1.12 -3.36 +-1.12 -3.36 +-0.333 -0.999 +-0.33 -0.99 +-0.3 -0.9 +0.0000000000000000000000000 0.0000000000000000000000000 +0 0 +0 0 +0.01 0.03 +0.02 0.06 +0.1 0.3 +0.2 0.6 +0.3 0.9 +0.33 0.99 +0.333 0.999 +0.9999999999999999999999999 2.9999999999999999999999997 +1 3 +1.0 3.0 +1.12 3.36 +1.122 3.366 +2 6 +2 6 +3.14 9.42 +3.14 9.42 +3.14 9.42 +3.140 9.420 +10 30 +20 60 +100 300 +124.00 372.00 +125.2 375.6 +200 600 +1234567890.1234567800 3703703670.3703703400 +PREHOOK: query: DROP TABLE DECIMAL_4_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_4_1 +PREHOOK: Output: default@decimal_4_1 +POSTHOOK: query: DROP TABLE DECIMAL_4_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_4_1 +POSTHOOK: Output: default@decimal_4_1 +PREHOOK: query: DROP TABLE DECIMAL_4_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_4_2 +PREHOOK: Output: default@decimal_4_2 +POSTHOOK: query: DROP TABLE DECIMAL_4_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_4_2 +POSTHOOK: Output: default@decimal_4_2 Index: ql/src/test/results/clientpositive/vector_decimal_5.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_5.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_5.q.out (working copy) @@ -0,0 +1,239 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_5_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_5_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_5_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_5_txt +POSTHOOK: query: CREATE TABLE DECIMAL_5_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_5_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_5_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_5_txt +PREHOOK: query: CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_5 +POSTHOOK: query: CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_5 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_5 SELECT * FROM DECIMAL_5_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5_txt +PREHOOK: Output: default@decimal_5 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_5 SELECT * FROM DECIMAL_5_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5_txt +POSTHOOK: Output: default@decimal_5 +POSTHOOK: Lineage: decimal_5.key SIMPLE [(decimal_5_txt)decimal_5_txt.FieldSchema(name:key, type:decimal(10,5), comment:null), ] +POSTHOOK: Lineage: decimal_5.value SIMPLE [(decimal_5_txt)decimal_5_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: SELECT key FROM DECIMAL_5 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +NULL +NULL +NULL +-4400 +-1255.49 +-1.122 +-1.12 +-1.12 +-0.333 +-0.33 +-0.3 +0.00000 +0 +0 +0.01 +0.02 +0.1 +0.2 +0.3 +0.33 +0.333 +1 +1.0 +1.00000 +1.12 +1.122 +2 +2 +3.14 +3.14 +3.14 +3.140 +10 +20 +100 +124.00 +125.2 +200 +PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +NULL +-4400 +-1255.49 +-1.122 +-1.12 +-0.333 +-0.33 +-0.3 +0.00000 +0.01 +0.02 +0.1 +0.2 +0.3 +0.33 +0.333 +1 +1.12 +1.122 +2 +3.14 +10 +20 +100 +124.00 +125.2 +200 +PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +-4400 +NULL +0 +0 +100 +10 +1 +0 +0 +200 +20 +2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +3 +-1 +-1 +-1 +1 +1 +124 +125 +-1255 +3 +3 +3 +1 +NULL +NULL +PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_5 +#### A masked pattern was here #### +NULL +NULL +0.000 +0 +100 +10 +1 +0.1 +0.01 +200 +20 +2 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +1.0 +2 +3.14 +-1.12 +-1.12 +-1.122 +1.12 +1.122 +124.00 +125.2 +NULL +3.14 +3.14 +3.140 +1.000 +NULL +NULL +PREHOOK: query: DROP TABLE DECIMAL_5_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_5_txt +PREHOOK: Output: default@decimal_5_txt +POSTHOOK: query: DROP TABLE DECIMAL_5_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_5_txt +POSTHOOK: Output: default@decimal_5_txt +PREHOOK: query: DROP TABLE DECIMAL_5 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_5 +PREHOOK: Output: default@decimal_5 +POSTHOOK: query: DROP TABLE DECIMAL_5 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_5 +POSTHOOK: Output: default@decimal_5 Index: ql/src/test/results/clientpositive/vector_decimal_6.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_6.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_6.q.out (working copy) @@ -0,0 +1,303 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_1_txt +POSTHOOK: query: CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_1_txt +PREHOOK: query: CREATE TABLE DECIMAL_6_2_txt(key decimal(17,4), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_2_txt +POSTHOOK: query: CREATE TABLE DECIMAL_6_2_txt(key decimal(17,4), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_2_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_6_1_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_6_1_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_6_2_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_6_2_txt +PREHOOK: query: CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_1 +POSTHOOK: query: CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_1 +PREHOOK: query: CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_2 +POSTHOOK: query: CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_2 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_1 SELECT * FROM DECIMAL_6_1_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_1_txt +PREHOOK: Output: default@decimal_6_1 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_1 SELECT * FROM DECIMAL_6_1_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_1_txt +POSTHOOK: Output: default@decimal_6_1 +POSTHOOK: Lineage: decimal_6_1.key SIMPLE [(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:key, type:decimal(10,5), comment:null), ] +POSTHOOK: Lineage: decimal_6_1.value SIMPLE [(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_2 SELECT * FROM DECIMAL_6_2_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_2_txt +PREHOOK: Output: default@decimal_6_2 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_6_2 SELECT * FROM DECIMAL_6_2_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_2_txt +POSTHOOK: Output: default@decimal_6_2 +POSTHOOK: Lineage: decimal_6_2.key SIMPLE [(decimal_6_2_txt)decimal_6_2_txt.FieldSchema(name:key, type:decimal(17,4), comment:null), ] +POSTHOOK: Lineage: decimal_6_2.value SIMPLE [(decimal_6_2_txt)decimal_6_2_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_6_1 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_6_1 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_1 +#### A masked pattern was here #### +NULL -1234567890 +NULL 0 +NULL 3 +NULL 4 +NULL 1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-0.333 0 +-0.3 0 +0.00000 0 +0 0 +0.333 0 +1.0 1 +1.00000 1 +1.12 1 +1.122 1 +2 2 +3.14 3 +3.14 3 +3.140 4 +10 10 +10.73433 5 +124.00 124 +125.2 125 +23232.23435 2 +PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +NULL 0 +-1234567890.1235 -1234567890 +-4400 4400 +-1255.49 -1255 +-1.122 -11 +-1.12 -1 +-0.333 0 +-0.3 0 +0.0000 0 +0 0 +0.333 0 +1.0 1 +1.0000 1 +1.12 1 +1.122 1 +2 2 +3.14 3 +3.14 3 +3.140 4 +10 10 +10.7343 5 +124.00 124 +125.2 125 +23232.2344 2 +2389432.2375 3 +2389432.2375 4 +1234567890.1235 1234567890 +PREHOOK: query: SELECT T.key from ( + SELECT key, value from DECIMAL_6_1 + UNION ALL + SELECT key, value from DECIMAL_6_2 +) T order by T.key +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_1 +PREHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT T.key from ( + SELECT key, value from DECIMAL_6_1 + UNION ALL + SELECT key, value from DECIMAL_6_2 +) T order by T.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_1 +POSTHOOK: Input: default@decimal_6_2 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +-1234567890.1235 +-4400 +-4400 +-1255.49 +-1255.49 +-1.122 +-1.122 +-1.12 +-1.12 +-0.333 +-0.333 +-0.3 +-0.3 +0.00000 +0.0000 +0 +0 +0.333 +0.333 +1.0 +1.0 +1.0000 +1.00000 +1.12 +1.12 +1.122 +1.122 +2 +2 +3.14 +3.14 +3.14 +3.14 +3.140 +3.140 +10 +10 +10.7343 +10.73433 +124.00 +124.00 +125.2 +125.2 +23232.23435 +23232.2344 +2389432.2375 +2389432.2375 +1234567890.1235 +PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_6_1 +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_3 +POSTHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_6_1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_6_3 +PREHOOK: query: desc DECIMAL_6_3 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@decimal_6_3 +POSTHOOK: query: desc DECIMAL_6_3 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@decimal_6_3 +k double +v int +PREHOOK: query: SELECT * FROM DECIMAL_6_3 ORDER BY k, v +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_6_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_6_3 ORDER BY k, v +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_6_3 +#### A masked pattern was here #### +NULL -695344902 +NULL 0 +NULL 33 +NULL 44 +NULL 695344902 +-4394.5 48400 +-1249.99 -13805 +4.378 -121 +4.38 -11 +5.167 0 +5.2 0 +5.5 0 +5.5 0 +5.833 0 +6.5 11 +6.5 11 +6.62 11 +6.622 11 +7.5 22 +8.64 33 +8.64 33 +8.64 44 +15.5 110 +16.23433 55 +129.5 1364 +130.7 1375 +23237.73435 22 Index: ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out (working copy) @@ -106,14 +106,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 --3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 -762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 3 6984454.211097692 -617.56077692307690 6983219.08954384584620 -253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 1024 11697.96923076923100 -11712.99230769231000 -416182.64030769233089 -528534767 1024 5831542.2692483780 -9777.1594594595 11646372.8607481068 1024 6984454.21109769200000 -11710.13076923077100 13948892.79980307629003 -626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 1024 11645.74615384615400 -11712.27692307692300 12625.04759999997746 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 +-3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 +-563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 +762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 +6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 +253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 +528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 +626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 PREHOOK: query: -- Now add the others... EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1), @@ -204,11 +204,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 1956.576923076922966667 6821.495748565159 6822.606289190924 --3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 -2325.50327307692295 1707.9424961538462 2415.395441814127 -762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 3493144.07839499984625 3491310.1327026924 4937458.140118758 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.56077692307690 6983219.08954384584620 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 -253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.96923076923100 -11712.99230769231000 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 -528534767 1024 5831542.2692483780 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.21109769200000 -11710.13076923077100 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 -626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.74615384615400 -11712.27692307692300 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 1956.576923076922966667 6821.495748565159 6822.606289190924 +-3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 +-563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 +762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 3493144.07839499984625 3491310.1327026924 4937458.140118758 +6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 +253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 +528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 +626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 Index: ql/src/test/results/clientpositive/vector_decimal_cast.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_cast.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vector_decimal_cast.q.out (working copy) @@ -46,13 +46,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### --13326.0 528534767 true 1969-12-31 15:59:46.674 -13326.0000000000 528534767.00000000000000 1.00 -13 --15813.0 528534767 true 1969-12-31 15:59:55.787 -15813.0000000000 528534767.00000000000000 1.00 -4 --9566.0 528534767 true 1969-12-31 15:59:44.187 -9566.0000000000 528534767.00000000000000 1.00 -16 -15007.0 528534767 true 1969-12-31 15:59:50.434 15007.0000000000 528534767.00000000000000 1.00 -10 -7021.0 528534767 true 1969-12-31 16:00:15.007 7021.0000000000 528534767.00000000000000 1.00 15 -4963.0 528534767 true 1969-12-31 16:00:07.021 4963.0000000000 528534767.00000000000000 1.00 7 --7824.0 528534767 true 1969-12-31 16:00:04.963 -7824.0000000000 528534767.00000000000000 1.00 5 --15431.0 528534767 true 1969-12-31 15:59:52.176 -15431.0000000000 528534767.00000000000000 1.00 -8 --15549.0 528534767 true 1969-12-31 15:59:44.569 -15549.0000000000 528534767.00000000000000 1.00 -15 -5780.0 528534767 true 1969-12-31 15:59:44.451 5780.0000000000 528534767.00000000000000 1.00 -16 +-13326.0 528534767 true 1969-12-31 15:59:46.674 -13326.0 528534767 1 -13 +-15813.0 528534767 true 1969-12-31 15:59:55.787 -15813.0 528534767 1 -4 +-9566.0 528534767 true 1969-12-31 15:59:44.187 -9566.0 528534767 1 -16 +15007.0 528534767 true 1969-12-31 15:59:50.434 15007.0 528534767 1 -10 +7021.0 528534767 true 1969-12-31 16:00:15.007 7021.0 528534767 1 15 +4963.0 528534767 true 1969-12-31 16:00:07.021 4963.0 528534767 1 7 +-7824.0 528534767 true 1969-12-31 16:00:04.963 -7824.0 528534767 1 5 +-15431.0 528534767 true 1969-12-31 15:59:52.176 -15431.0 528534767 1 -8 +-15549.0 528534767 true 1969-12-31 15:59:44.569 -15549.0 528534767 1 -15 +5780.0 528534767 true 1969-12-31 15:59:44.451 5780.0 528534767 1 -16 Index: ql/src/test/results/clientpositive/vector_decimal_expressions.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_expressions.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vector_decimal_expressions.q.out (working copy) @@ -56,13 +56,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test #### A masked pattern was here #### -19699.41746361742300 -12507.91330561334600 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 -9216.33970893968500 -5851.80644490647000 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 -6514.84033264034640 -4136.52120582119280 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 -7587.30145530147700 -4817.46777546775400 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 -19197.97297297300000 -12189.52702702700000 0.835155361813429 2.6880848817567654E7 5.4729729730 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 -17098.99459459460000 -10856.80540540540000 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 -12433.72307692307700 -7894.64615384615400 0.8352770361086894 1.12754688E7 7.6000000000 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 -7247.31683991686200 -4601.59854469852400 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 -14757.17006237004650 -9369.89147609149300 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 -10964.83201663199300 -6961.99106029108600 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 +19699.417463617423 -12507.913305613346 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 +9216.339708939685 -5851.806444906470 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 +6514.8403326403464 -4136.5212058211928 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 +7587.301455301477 -4817.467775467754 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 +19197.9729729730 -12189.5270270270 0.835155361813429 2.6880848817567654E7 5.4729729730 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 +17098.9945945946 -10856.8054054054 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 +12433.723076923077 -7894.646153846154 0.8352770361086894 1.12754688E7 7.6 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 +7247.316839916862 -4601.598544698524 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 +14757.1700623700465 -9369.8914760914930 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 +10964.832016631993 -6961.991060291086 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 Index: ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out (working copy) @@ -195,14 +195,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test #### A masked pattern was here #### --119.4594594595 -119.46 -119 -120 -119 1.316485E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.459459459500000000 -0.07885666683797002 NaN 0.9968859644388647 NaN -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL -9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.435135135100000000 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 -9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.435135135100000000 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 -9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.435135135100000000 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-119.4594594595 -119.46 -119 -120 -119 1.316485E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.4594594595 -0.07885666683797002 NaN 0.9968859644388647 NaN -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL Index: ql/src/test/results/clientpositive/vector_decimal_precision.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_precision.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_precision.q.out (working copy) @@ -0,0 +1,669 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION_txt(dec decimal(20,10)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_PRECISION_txt +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION_txt(dec decimal(20,10)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_PRECISION_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_precision_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_precision_txt +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION(dec decimal(20,10)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_PRECISION +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION(dec decimal(20,10)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_PRECISION +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_PRECISION SELECT * FROM DECIMAL_PRECISION_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision_txt +PREHOOK: Output: default@decimal_precision +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_PRECISION SELECT * FROM DECIMAL_PRECISION_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision_txt +POSTHOOK: Output: default@decimal_precision +POSTHOOK: Lineage: decimal_precision.dec SIMPLE [(decimal_precision_txt)decimal_precision_txt.FieldSchema(name:dec, type:decimal(20,10), comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +0.0000000000 +0.0000000000 +0.0000000000 +0.0000000000 +0 +0.1234567890 +0.1234567890 +1.2345678901 +1.2345678901 +1.2345678901 +12.3456789012 +12.3456789012 +12.3456789012 +123.4567890123 +123.4567890123 +123.4567890123 +1234.5678901235 +1234.5678901235 +1234.5678901235 +12345.6789012346 +12345.6789012346 +123456.7890123456 +123456.7890123457 +1234567.890123456 +1234567.8901234568 +12345678.90123456 +12345678.9012345679 +123456789.0123456 +123456789.0123456789 +1234567890.123456 +1234567890.1234567890 +PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0 1 -1 +0.1234567890 1.1234567890 -0.8765432110 +0.1234567890 1.1234567890 -0.8765432110 +1.2345678901 2.2345678901 0.2345678901 +1.2345678901 2.2345678901 0.2345678901 +1.2345678901 2.2345678901 0.2345678901 +12.3456789012 13.3456789012 11.3456789012 +12.3456789012 13.3456789012 11.3456789012 +12.3456789012 13.3456789012 11.3456789012 +123.4567890123 124.4567890123 122.4567890123 +123.4567890123 124.4567890123 122.4567890123 +123.4567890123 124.4567890123 122.4567890123 +1234.5678901235 1235.5678901235 1233.5678901235 +1234.5678901235 1235.5678901235 1233.5678901235 +1234.5678901235 1235.5678901235 1233.5678901235 +12345.6789012346 12346.6789012346 12344.6789012346 +12345.6789012346 12346.6789012346 12344.6789012346 +123456.7890123456 123457.7890123456 123455.7890123456 +123456.7890123457 123457.7890123457 123455.7890123457 +1234567.890123456 1234568.890123456 1234566.890123456 +1234567.8901234568 1234568.8901234568 1234566.8901234568 +12345678.90123456 12345679.90123456 12345677.90123456 +12345678.9012345679 12345679.9012345679 12345677.9012345679 +123456789.0123456 123456790.0123456 123456788.0123456 +123456789.0123456789 123456790.0123456789 123456788.0123456789 +1234567890.123456 1234567891.123456 1234567889.123456 +1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 +PREHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +NULL NULL NULL +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0 0 0 +0.1234567890 0.2469135780 0.041152263 +0.1234567890 0.2469135780 0.041152263 +1.2345678901 2.4691357802 0.411522630033 +1.2345678901 2.4691357802 0.411522630033 +1.2345678901 2.4691357802 0.411522630033 +12.3456789012 24.6913578024 4.1152263004 +12.3456789012 24.6913578024 4.1152263004 +12.3456789012 24.6913578024 4.1152263004 +123.4567890123 246.9135780246 41.1522630041 +123.4567890123 246.9135780246 41.1522630041 +123.4567890123 246.9135780246 41.1522630041 +1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 +12345.6789012346 24691.3578024692 4115.226300411533 +12345.6789012346 24691.3578024692 4115.226300411533 +123456.7890123456 246913.5780246912 41152.2630041152 +123456.7890123457 246913.5780246914 41152.263004115233 +1234567.890123456 2469135.780246912 411522.630041152 +1234567.8901234568 2469135.7802469136 411522.630041152267 +12345678.90123456 24691357.80246912 4115226.30041152 +12345678.9012345679 24691357.8024691358 4115226.300411522633 +123456789.0123456 246913578.0246912 41152263.0041152 +123456789.0123456789 246913578.0246913578 41152263.0041152263 +1234567890.123456 2469135780.246912 411522630.041152 +1234567890.1234567890 2469135780.2469135780 411522630.041152263 +PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0 0 +0.1234567890 0.013717421 +0.1234567890 0.013717421 +1.2345678901 0.137174210011 +1.2345678901 0.137174210011 +1.2345678901 0.137174210011 +12.3456789012 1.371742100133 +12.3456789012 1.371742100133 +12.3456789012 1.371742100133 +123.4567890123 13.717421001367 +123.4567890123 13.717421001367 +123.4567890123 13.717421001367 +1234.5678901235 137.174210013722 +1234.5678901235 137.174210013722 +1234.5678901235 137.174210013722 +12345.6789012346 1371.742100137178 +12345.6789012346 1371.742100137178 +123456.7890123456 13717.421001371733 +123456.7890123457 13717.421001371744 +1234567.890123456 137174.210013717333 +1234567.8901234568 137174.210013717422 +12345678.90123456 1371742.100137173333 +12345678.9012345679 1371742.100137174211 +123456789.0123456 13717421.001371733333 +123456789.0123456789 13717421.0013717421 +1234567890.123456 137174210.013717333333 +1234567890.1234567890 137174210.013717421 +PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0 0 +0.1234567890 0.0045724736667 +0.1234567890 0.0045724736667 +1.2345678901 0.0457247366704 +1.2345678901 0.0457247366704 +1.2345678901 0.0457247366704 +12.3456789012 0.4572473667111 +12.3456789012 0.4572473667111 +12.3456789012 0.4572473667111 +123.4567890123 4.5724736671222 +123.4567890123 4.5724736671222 +123.4567890123 4.5724736671222 +1234.5678901235 45.7247366712407 +1234.5678901235 45.7247366712407 +1234.5678901235 45.7247366712407 +12345.6789012346 457.2473667123926 +12345.6789012346 457.2473667123926 +123456.7890123456 4572.4736671239111 +123456.7890123457 4572.4736671239148 +1234567.890123456 45724.7366712391111 +1234567.8901234568 45724.7366712391407 +12345678.90123456 457247.3667123911111 +12345678.9012345679 457247.3667123914037 +123456789.0123456 4572473.6671239111111 +123456789.0123456789 4572473.6671239140333 +1234567890.123456 45724736.6712391111111 +1234567890.1234567890 45724736.6712391403333 +PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +NULL NULL +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0 0 +0.1234567890 0.01524157875019052100 +0.1234567890 0.01524157875019052100 +1.2345678901 1.52415787526596567801 +1.2345678901 1.52415787526596567801 +1.2345678901 1.52415787526596567801 +12.3456789012 152.41578753153483936144 +12.3456789012 152.41578753153483936144 +12.3456789012 152.41578753153483936144 +123.4567890123 15241.57875322755800955129 +123.4567890123 15241.57875322755800955129 +123.4567890123 15241.57875322755800955129 +1234.5678901235 1524157.87532399036884525225 +1234.5678901235 1524157.87532399036884525225 +1234.5678901235 1524157.87532399036884525225 +12345.6789012346 152415787.53238916034140423716 +12345.6789012346 152415787.53238916034140423716 +123456.7890123456 15241578753.23881726870921383936 +123456.7890123457 15241578753.23884196006701630849 +1234567.890123456 1524157875323.881726870921383936 +1234567.8901234568 1524157875323.88370217954558146624 +12345678.90123456 152415787532388.1726870921383936 +12345678.9012345679 152415787532388.36774881877789971041 +123456789.0123456 15241578753238817.26870921383936 +123456789.0123456789 15241578753238836.75019051998750190521 +1234567890.123456 NULL +1234567890.1234567890 NULL +PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_precision + Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(20,10)) + outputColumnNames: dec + Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(dec), sum(dec) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: decimal(30,10)) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(24,14)), _col1 (type: decimal(30,10)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +88499534.57586576220645 2743485571.8518386284 +PREHOOK: query: SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL +PREHOOK: query: SELECT * from DECIMAL_PRECISION WHERE dec > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from DECIMAL_PRECISION WHERE dec > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +PREHOOK: query: SELECT dec * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT dec * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +NULL +PREHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +12345678901234567890.12345678 +PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_precision +#### A masked pattern was here #### +75 +PREHOOK: query: DROP TABLE DECIMAL_PRECISION_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_precision_txt +PREHOOK: Output: default@decimal_precision_txt +POSTHOOK: query: DROP TABLE DECIMAL_PRECISION_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_precision_txt +POSTHOOK: Output: default@decimal_precision_txt +PREHOOK: query: DROP TABLE DECIMAL_PRECISION +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_precision +PREHOOK: Output: default@decimal_precision +POSTHOOK: query: DROP TABLE DECIMAL_PRECISION +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_precision +POSTHOOK: Output: default@decimal_precision Index: ql/src/test/results/clientpositive/vector_decimal_round.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_round.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_round.q.out (working copy) @@ -0,0 +1,414 @@ +PREHOOK: query: create table decimal_tbl_txt (dec decimal(10,0)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_txt +POSTHOOK: query: create table decimal_tbl_txt (dec decimal(10,0)) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_txt +PREHOOK: query: insert into table decimal_tbl_txt values(101) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@decimal_tbl_txt +POSTHOOK: query: insert into table decimal_tbl_txt values(101) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@decimal_tbl_txt +POSTHOOK: Lineage: decimal_tbl_txt.dec EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +101 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by dec +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_txt + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(11,0)) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +101 100 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_txt + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,0)) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_txt +#### A masked pattern was here #### +101 100 +PREHOOK: query: create table decimal_tbl_rc (dec decimal(10,0)) +row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_rc +POSTHOOK: query: create table decimal_tbl_rc (dec decimal(10,0)) +row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_rc +PREHOOK: query: insert into table decimal_tbl_rc values(101) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@decimal_tbl_rc +POSTHOOK: query: insert into table decimal_tbl_rc values(101) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@decimal_tbl_rc +POSTHOOK: Lineage: decimal_tbl_rc.dec EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_rc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_rc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +101 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by dec +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_rc + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(11,0)) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +101 100 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_rc + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,0)) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_rc +#### A masked pattern was here #### +101 100 +PREHOOK: query: create table decimal_tbl_orc (dec decimal(10,0)) +stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_orc +POSTHOOK: query: create table decimal_tbl_orc (dec decimal(10,0)) +stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_orc +PREHOOK: query: insert into table decimal_tbl_orc values(101) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@decimal_tbl_orc +POSTHOOK: query: insert into table decimal_tbl_orc values(101) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@decimal_tbl_orc +POSTHOOK: Lineage: decimal_tbl_orc.dec EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +101 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by dec +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by dec +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(10,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(11,0)) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by dec +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by dec +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +101 100 +PREHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,0)) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +POSTHOOK: query: select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_orc +#### A masked pattern was here #### +101 100 Index: ql/src/test/results/clientpositive/vector_decimal_round_2.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_round_2.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_round_2.q.out (working copy) @@ -0,0 +1,471 @@ +PREHOOK: query: create table decimal_tbl_1_orc (dec decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_1_orc +POSTHOOK: query: create table decimal_tbl_1_orc (dec decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_1_orc +PREHOOK: query: insert into table decimal_tbl_1_orc values(55555) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@decimal_tbl_1_orc +POSTHOOK: query: insert into table decimal_tbl_1_orc values(55555) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@decimal_tbl_1_orc +POSTHOOK: Lineage: decimal_tbl_1_orc.dec EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_1_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_1_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +55555 +PREHOOK: query: -- EXPLAIN +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +EXPLAIN +SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +PREHOOK: type: QUERY +POSTHOOK: query: -- EXPLAIN +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +-- SELECT dec, round(null), round(null, 0), round(125, null), +-- round(1.0/0.0, 0), round(power(-1.0,0.5), 0) +-- FROM decimal_tbl_1_orc ORDER BY dec; + +EXPLAIN +SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_1_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(dec) (type: decimal(21,0)), round(dec, 0) (type: decimal(21,0)), round(dec, 1) (type: decimal(22,1)), round(dec, 2) (type: decimal(23,2)), round(dec, 3) (type: decimal(24,3)), round(dec, -1) (type: decimal(21,0)), round(dec, -2) (type: decimal(21,0)), round(dec, -3) (type: decimal(21,0)), round(dec, -4) (type: decimal(21,0)), round(dec, -5) (type: decimal(21,0)), round(dec, -6) (type: decimal(21,0)), round(dec, -7) (type: decimal(21,0)), round(dec, -8) (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(21,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(22,1)), VALUE._col2 (type: decimal(23,2)), VALUE._col3 (type: decimal(24,3)), VALUE._col4 (type: decimal(21,0)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3), + round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4), + round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8) +FROM decimal_tbl_1_orc ORDER BY d +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_1_orc +#### A masked pattern was here #### +55555 55555 55555.0 55555.00 55555.000 55560 55600 56000 60000 100000 0 0 0 +PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_2_orc +POSTHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_2_orc +PREHOOK: query: insert into table decimal_tbl_2_orc values(125.315, -125.315) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@decimal_tbl_2_orc +POSTHOOK: query: insert into table decimal_tbl_2_orc values(125.315, -125.315) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@decimal_tbl_2_orc +POSTHOOK: Lineage: decimal_tbl_2_orc.neg EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: decimal_tbl_2_orc.pos EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_2_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_2_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +125.315 -125.315 +PREHOOK: query: EXPLAIN +SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_2_orc + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(pos) (type: decimal(21,0)), round(pos, 0) (type: decimal(21,0)), round(pos, 1) (type: decimal(22,1)), round(pos, 2) (type: decimal(23,2)), round(pos, 3) (type: decimal(24,3)), round(pos, 4) (type: decimal(25,4)), round(pos, -1) (type: decimal(21,0)), round(pos, -2) (type: decimal(21,0)), round(pos, -3) (type: decimal(21,0)), round(pos, -4) (type: decimal(21,0)), round(neg) (type: decimal(21,0)), round(neg, 0) (type: decimal(21,0)), round(neg, 1) (type: decimal(22,1)), round(neg, 2) (type: decimal(23,2)), round(neg, 3) (type: decimal(24,3)), round(neg, 4) (type: decimal(25,4)), round(neg, -1) (type: decimal(21,0)), round(neg, -2) (type: decimal(21,0)), round(neg, -3) (type: decimal(21,0)), round(neg, -4) (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(21,0)) + sort order: + + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(25,4)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(22,1)), _col13 (type: decimal(23,2)), _col14 (type: decimal(24,3)), _col15 (type: decimal(25,4)), _col16 (type: decimal(21,0)), _col17 (type: decimal(21,0)), _col18 (type: decimal(21,0)), _col19 (type: decimal(21,0)) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(22,1)), VALUE._col2 (type: decimal(23,2)), VALUE._col3 (type: decimal(24,3)), VALUE._col4 (type: decimal(25,4)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(22,1)), VALUE._col12 (type: decimal(23,2)), VALUE._col13 (type: decimal(24,3)), VALUE._col14 (type: decimal(25,4)), VALUE._col15 (type: decimal(21,0)), VALUE._col16 (type: decimal(21,0)), VALUE._col17 (type: decimal(21,0)), VALUE._col18 (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + round(pos) as p, round(pos, 0), + round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4), + round(pos, -1), round(pos, -2), round(pos, -3), round(pos, -4), + round(neg), round(neg, 0), + round(neg, 1), round(neg, 2), round(neg, 3), round(neg, 4), + round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4) +FROM decimal_tbl_2_orc ORDER BY p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_2_orc +#### A masked pattern was here #### +125 125 125.3 125.32 125.315 125.3150 130 100 0 0 -125 -125 -125.3 -125.32 -125.315 -125.3150 -130 -100 0 0 +PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_3_orc +POSTHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_3_orc +PREHOOK: query: insert into table decimal_tbl_3_orc values(3.141592653589793) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@decimal_tbl_3_orc +POSTHOOK: query: insert into table decimal_tbl_3_orc values(3.141592653589793) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@decimal_tbl_3_orc +POSTHOOK: Lineage: decimal_tbl_3_orc.dec EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_3_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_3_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +3.141592653589793 +PREHOOK: query: EXPLAIN +SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_3_orc + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(dec, -15) (type: decimal(21,0)), round(dec, -16) (type: decimal(21,0)), round(dec, -5) (type: decimal(21,0)), round(dec, -6) (type: decimal(21,0)), round(dec, -3) (type: decimal(21,0)), round(dec, -4) (type: decimal(21,0)), round(dec, -1) (type: decimal(21,0)), round(dec, -2) (type: decimal(21,0)), round(dec, 0) (type: decimal(21,0)), round(dec, 1) (type: decimal(22,1)), round(dec, 2) (type: decimal(23,2)), round(dec, 3) (type: decimal(24,3)), round(dec, -13) (type: decimal(21,0)), round(dec, 4) (type: decimal(25,4)), round(dec, 5) (type: decimal(26,5)), round(dec, 6) (type: decimal(27,6)), round(dec, 7) (type: decimal(28,7)), round(dec, 8) (type: decimal(29,8)), round(dec, 9) (type: decimal(30,9)), round(dec, 10) (type: decimal(31,10)), round(dec, 11) (type: decimal(32,11)), round(dec, 12) (type: decimal(33,12)), round(dec, 13) (type: decimal(34,13)), round(dec, -14) (type: decimal(21,0)), round(dec, 14) (type: decimal(35,14)), round(dec, 15) (type: decimal(36,15)), round(dec, 16) (type: decimal(37,16)), round(dec, -11) (type: decimal(21,0)), round(dec, -12) (type: decimal(21,0)), round(dec, -9) (type: decimal(21,0)), round(dec, -10) (type: decimal(21,0)), round(dec, -7) (type: decimal(21,0)), round(dec, -8) (type: decimal(21,0)) + outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col2, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col3, _col31, _col32, _col33, _col4, _col5, _col6, _col7, _col8, _col9 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(21,0)) + sort order: + + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(21,0)), _col3 (type: decimal(21,0)), _col4 (type: decimal(21,0)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)), _col13 (type: decimal(21,0)), _col14 (type: decimal(21,0)), _col15 (type: decimal(21,0)), _col16 (type: decimal(21,0)), _col17 (type: decimal(22,1)), _col18 (type: decimal(23,2)), _col19 (type: decimal(24,3)), _col20 (type: decimal(25,4)), _col21 (type: decimal(26,5)), _col22 (type: decimal(27,6)), _col23 (type: decimal(28,7)), _col24 (type: decimal(29,8)), _col25 (type: decimal(30,9)), _col26 (type: decimal(31,10)), _col27 (type: decimal(32,11)), _col28 (type: decimal(33,12)), _col29 (type: decimal(34,13)), _col31 (type: decimal(35,14)), _col32 (type: decimal(36,15)), _col33 (type: decimal(37,16)) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(21,0)), VALUE._col2 (type: decimal(21,0)), VALUE._col3 (type: decimal(21,0)), VALUE._col4 (type: decimal(21,0)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(21,0)), VALUE._col12 (type: decimal(21,0)), VALUE._col13 (type: decimal(21,0)), VALUE._col14 (type: decimal(21,0)), VALUE._col15 (type: decimal(21,0)), VALUE._col16 (type: decimal(22,1)), VALUE._col17 (type: decimal(23,2)), VALUE._col18 (type: decimal(24,3)), VALUE._col19 (type: decimal(25,4)), VALUE._col20 (type: decimal(26,5)), VALUE._col21 (type: decimal(27,6)), VALUE._col22 (type: decimal(28,7)), VALUE._col23 (type: decimal(29,8)), VALUE._col24 (type: decimal(30,9)), VALUE._col25 (type: decimal(31,10)), VALUE._col26 (type: decimal(32,11)), VALUE._col27 (type: decimal(33,12)), VALUE._col28 (type: decimal(34,13)), VALUE._col28 (type: decimal(34,13)), VALUE._col29 (type: decimal(35,14)), VALUE._col30 (type: decimal(36,15)), VALUE._col31 (type: decimal(37,16)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + round(dec, -15) as d, round(dec, -16), + round(dec, -13), round(dec, -14), + round(dec, -11), round(dec, -12), + round(dec, -9), round(dec, -10), + round(dec, -7), round(dec, -8), + round(dec, -5), round(dec, -6), + round(dec, -3), round(dec, -4), + round(dec, -1), round(dec, -2), + round(dec, 0), round(dec, 1), + round(dec, 2), round(dec, 3), + round(dec, 4), round(dec, 5), + round(dec, 6), round(dec, 7), + round(dec, 8), round(dec, 9), + round(dec, 10), round(dec, 11), + round(dec, 12), round(dec, 13), + round(dec, 13), round(dec, 14), + round(dec, 15), round(dec, 16) +FROM decimal_tbl_3_orc ORDER BY d +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_3_orc +#### A masked pattern was here #### +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3.1 3.14 3.142 3.1416 3.14159 3.141593 3.1415927 3.14159265 3.141592654 3.1415926536 3.14159265359 3.141592653590 3.1415926535898 3.1415926535898 3.14159265358979 3.141592653589793 3.1415926535897930 +PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_tbl_4_orc +POSTHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_tbl_4_orc +PREHOOK: query: insert into table decimal_tbl_4_orc values(1809242.3151111344, -1809242.3151111344) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@decimal_tbl_4_orc +POSTHOOK: query: insert into table decimal_tbl_4_orc values(1809242.3151111344, -1809242.3151111344) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@decimal_tbl_4_orc +POSTHOOK: Lineage: decimal_tbl_4_orc.neg EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: decimal_tbl_4_orc.pos EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from decimal_tbl_4_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from decimal_tbl_4_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +1809242.3151111344 -1809242.3151111344 +PREHOOK: query: EXPLAIN +SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_tbl_4_orc + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9)), round(1809242.3151111344, 9) (type: decimal(17,9)), round((- 1809242.3151111344), 9) (type: decimal(17,9)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(30,9)) + sort order: + + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(30,9)), _col2 (type: decimal(17,9)), _col3 (type: decimal(17,9)) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), VALUE._col1 (type: decimal(17,9)), VALUE._col2 (type: decimal(17,9)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9) +FROM decimal_tbl_4_orc ORDER BY p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_tbl_4_orc +#### A masked pattern was here #### +1809242.315111134 -1809242.315111134 1809242.315111134 -1809242.315111134 Index: ql/src/test/results/clientpositive/vector_decimal_trailing.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_trailing.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_trailing.q.out (working copy) @@ -0,0 +1,121 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_TRAILING_txt ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_TRAILING_txt +POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING_txt ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_TRAILING_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_trailing_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_trailing_txt +PREHOOK: query: CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_TRAILING +POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_TRAILING +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_TRAILING SELECT * FROM DECIMAL_TRAILING_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_trailing_txt +PREHOOK: Output: default@decimal_trailing +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_TRAILING SELECT * FROM DECIMAL_TRAILING_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_trailing_txt +POSTHOOK: Output: default@decimal_trailing +POSTHOOK: Lineage: decimal_trailing.a SIMPLE [(decimal_trailing_txt)decimal_trailing_txt.FieldSchema(name:a, type:decimal(10,4), comment:null), ] +POSTHOOK: Lineage: decimal_trailing.b SIMPLE [(decimal_trailing_txt)decimal_trailing_txt.FieldSchema(name:b, type:decimal(15,8), comment:null), ] +POSTHOOK: Lineage: decimal_trailing.id SIMPLE [(decimal_trailing_txt)decimal_trailing_txt.FieldSchema(name:id, type:int, comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_trailing +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_trailing +#### A masked pattern was here #### +0 0 0 +1 0 0 +2 NULL NULL +3 1.0000 1.00000000 +4 10.0000 10.00000000 +5 100.0000 100.00000000 +6 1000.0000 1000.00000000 +7 10000.0000 10000.00000000 +8 100000.0000 100000.00000000 +9 NULL 1000000.00000000 +10 NULL NULL +11 NULL NULL +12 NULL NULL +13 NULL NULL +14 NULL NULL +15 NULL NULL +16 NULL NULL +17 NULL NULL +18 1.0000 1.00000000 +19 10.000 10.0000000 +20 100.00 100.000000 +21 1000.0 1000.00000 +22 100000 10000.0000 +23 0.0000 0.00000000 +24 0.000 0.0000000 +25 0.00 0.000000 +26 0.0 0.00000 +27 0 0.00000 +28 12313.2000 134134.31252500 +29 99999.9990 134134.31242553 +PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_trailing_txt +PREHOOK: Output: default@decimal_trailing_txt +POSTHOOK: query: DROP TABLE DECIMAL_TRAILING_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_trailing_txt +POSTHOOK: Output: default@decimal_trailing_txt +PREHOOK: query: DROP TABLE DECIMAL_TRAILING +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_trailing +PREHOOK: Output: default@decimal_trailing +POSTHOOK: query: DROP TABLE DECIMAL_TRAILING +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_trailing +POSTHOOK: Output: default@decimal_trailing Index: ql/src/test/results/clientpositive/vector_decimal_udf.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_udf.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_udf.q.out (working copy) @@ -0,0 +1,2657 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF_txt +POSTHOOK: query: CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_udf_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_udf_txt +PREHOOK: query: CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF +POSTHOOK: query: CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf_txt +PREHOOK: Output: default@decimal_udf +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf_txt +POSTHOOK: Output: default@decimal_udf +POSTHOOK: Lineage: decimal_udf.key SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ] +POSTHOOK: Lineage: decimal_udf.value SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: -- addition +EXPLAIN SELECT key + key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- addition +EXPLAIN SELECT key + key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + key) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-8800 +NULL +0.0000000000 +0 +200 +20 +2 +0.2 +0.02 +400 +40 +4 +0 +0.4 +0.04 +0.6 +0.66 +0.666 +-0.6 +-0.66 +-0.666 +2.0 +4 +6.28 +-2.24 +-2.24 +-2.244 +2.24 +2.244 +248.00 +250.4 +-2510.98 +6.28 +6.28 +6.280 +2.0000000000 +-2469135780.2469135780 +2469135780.2469135600 +PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + value) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + value FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +0 +NULL +0.0000000000 +0 +200 +20 +2 +0.1 +0.01 +400 +40 +4 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +2.0 +4 +6.14 +-2.12 +-2.12 +-12.122 +2.12 +2.122 +248.00 +250.2 +-2510.49 +6.14 +6.14 +7.140 +2.0000000000 +-2469135780.1234567890 +2469135780.1234567800 +PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2200.0 +NULL +0.0 +0.0 +150.0 +15.0 +1.5 +0.1 +0.01 +300.0 +30.0 +3.0 +0.0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +1.5 +3.0 +4.640000000000001 +-1.62 +-1.62 +-6.622 +1.62 +1.622 +186.0 +187.7 +-1882.99 +4.640000000000001 +4.640000000000001 +5.140000000000001 +1.5 +-1.8518518351234567E9 +1.8518518351234567E9 +PREHOOK: query: EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + '1.0') (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key + '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key + '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4399.0 +NULL +1.0 +1.0 +101.0 +11.0 +2.0 +1.1 +1.01 +201.0 +21.0 +3.0 +1.0 +1.2 +1.02 +1.3 +1.33 +1.333 +0.7 +0.6699999999999999 +0.667 +2.0 +3.0 +4.140000000000001 +-0.1200000000000001 +-0.1200000000000001 +-0.12200000000000011 +2.12 +2.122 +125.0 +126.2 +-1254.49 +4.140000000000001 +4.140000000000001 +4.140000000000001 +2.0 +-1.2345678891234567E9 +1.2345678911234567E9 +PREHOOK: query: -- substraction +EXPLAIN SELECT key - key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- substraction +EXPLAIN SELECT key - key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - key) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +0 +NULL +0.0000000000 +0 +0 +0 +0 +0.0 +0.00 +0 +0 +0 +0 +0.0 +0.00 +0.0 +0.00 +0.000 +0.0 +0.00 +0.000 +0.0 +0 +0.00 +0.00 +0.00 +0.000 +0.00 +0.000 +0.00 +0.0 +0.00 +0.00 +0.00 +0.000 +0.0000000000 +0.0000000000 +0.0000000000 +PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - value) (type: decimal(21,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - value FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-8800 +NULL +0.0000000000 +0 +0 +0 +0 +0.1 +0.01 +0 +0 +0 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +0.0 +0 +0.14 +-0.12 +-0.12 +9.878 +0.12 +0.122 +0.00 +0.2 +-0.49 +0.14 +0.14 +-0.860 +0.0000000000 +-0.1234567890 +0.1234567800 +PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-6600.0 +NULL +0.0 +0.0 +50.0 +5.0 +0.5 +0.1 +0.01 +100.0 +10.0 +1.0 +0.0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +0.5 +1.0 +1.6400000000000001 +-0.6200000000000001 +-0.6200000000000001 +4.378 +0.6200000000000001 +0.6220000000000001 +62.0 +62.7 +-627.99 +1.6400000000000001 +1.6400000000000001 +1.1400000000000001 +0.5 +-6.172839451234567E8 +6.172839451234567E8 +PREHOOK: query: EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key - '1.0') (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key - '1.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key - '1.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4401.0 +NULL +-1.0 +-1.0 +99.0 +9.0 +0.0 +-0.9 +-0.99 +199.0 +19.0 +1.0 +-1.0 +-0.8 +-0.98 +-0.7 +-0.6699999999999999 +-0.667 +-1.3 +-1.33 +-1.333 +0.0 +1.0 +2.14 +-2.12 +-2.12 +-2.122 +0.1200000000000001 +0.12200000000000011 +123.0 +124.2 +-1256.49 +2.14 +2.14 +2.14 +0.0 +-1.2345678911234567E9 +1.2345678891234567E9 +PREHOOK: query: -- multiplication +EXPLAIN SELECT key * key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- multiplication +EXPLAIN SELECT key * key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * key) (type: decimal(38,20)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +19360000 +NULL +0.00000000000000000000 +0 +10000 +100 +1 +0.01 +0.0001 +40000 +400 +4 +0 +0.04 +0.0004 +0.09 +0.1089 +0.110889 +0.09 +0.1089 +0.110889 +1.00 +4 +9.8596 +1.2544 +1.2544 +1.258884 +1.2544 +1.258884 +15376.0000 +15675.04 +1576255.1401 +9.8596 +9.8596 +9.859600 +1.00000000000000000000 +NULL +NULL +PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key * value) > 0) (type: boolean) + Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)), value (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +100 100 +10 10 +1 1 +200 200 +20 20 +2 2 +1.0 1 +2 2 +3.14 3 +-1.12 -1 +-1.12 -1 +-1.122 -11 +1.12 1 +1.122 1 +124.00 124 +125.2 125 +-1255.49 -1255 +3.14 3 +3.14 3 +3.140 4 +1.0000000000 1 +-1234567890.1234567890 -1234567890 +1234567890.1234567800 1234567890 +PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * value) (type: decimal(31,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * value FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-19360000 +NULL +0.0000000000 +0 +10000 +100 +1 +0.0 +0.00 +40000 +400 +4 +0 +0.0 +0.00 +0.0 +0.00 +0.000 +0.0 +0.00 +0.000 +1.0 +4 +9.42 +1.12 +1.12 +12.342 +1.12 +1.122 +15376.00 +15650.0 +1575639.95 +9.42 +9.42 +12.560 +1.0000000000 +1524157875171467887.5019052100 +1524157875171467876.3907942000 +PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * (value/2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * (value/2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-9680000.0 +NULL +0.0 +0.0 +5000.0 +50.0 +0.5 +0.0 +0.0 +20000.0 +200.0 +2.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +-0.0 +-0.0 +-0.0 +0.5 +2.0 +4.71 +0.56 +0.56 +6.171 +0.56 +0.561 +7688.0 +7825.0 +787819.975 +4.71 +4.71 +6.28 +0.5 +7.6207893758573389E17 +7.6207893758573389E17 +PREHOOK: query: EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key * '2.0') (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key * '2.0' FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key * '2.0' FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-8800.0 +NULL +0.0 +0.0 +200.0 +20.0 +2.0 +0.2 +0.02 +400.0 +40.0 +4.0 +0.0 +0.4 +0.04 +0.6 +0.66 +0.666 +-0.6 +-0.66 +-0.666 +2.0 +4.0 +6.28 +-2.24 +-2.24 +-2.244 +2.24 +2.244 +248.0 +250.4 +-2510.98 +6.28 +6.28 +6.28 +2.0 +-2.4691357802469134E9 +2.4691357802469134E9 +PREHOOK: query: -- division +EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +POSTHOOK: query: -- division +EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / 0) (type: decimal(22,12)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / 0 FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / 0 FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +NULL +PREHOOK: query: EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / null) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / NULL FROM DECIMAL_UDF limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / NULL FROM DECIMAL_UDF limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +NULL +PREHOOK: query: EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key <> 0) (type: boolean) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / key) (type: decimal(38,24)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value <> 0) (type: boolean) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / value) (type: decimal(31,21)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1 +1 +1 +1 +1 +1 +1 +1 +1 +1.046666666666666666667 +1.12 +1.12 +0.102 +1.12 +1.122 +1 +1.0016 +1.000390438247011952191 +1.046666666666666666667 +1.046666666666666666667 +0.785 +1 +1.0000000001 +1.000000000099999992710 +PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value <> 0) (type: boolean) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key / (value / 2)) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0 +2.0933333333333333 +2.24 +2.24 +0.20400000000000001 +2.24 +2.244 +2.0 +2.0032 +2.000780876494024 +2.0933333333333333 +2.0933333333333333 +1.57 +2.0 +2.0000000002 +2.0000000002 +PREHOOK: query: EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (1 + (key / '2.0')) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT 1 + (key / '2.0') FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2199.0 +NULL +1.0 +1.0 +51.0 +6.0 +1.5 +1.05 +1.005 +101.0 +11.0 +2.0 +1.0 +1.1 +1.01 +1.15 +1.165 +1.1665 +0.85 +0.835 +0.8335 +1.5 +2.0 +2.5700000000000003 +0.43999999999999995 +0.43999999999999995 +0.43899999999999995 +1.56 +1.561 +63.0 +63.6 +-626.745 +2.5700000000000003 +2.5700000000000003 +2.5700000000000003 +1.5 +-6.172839440617284E8 +6.172839460617284E8 +PREHOOK: query: -- abs +EXPLAIN SELECT abs(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- abs +EXPLAIN SELECT abs(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: abs(key) (type: decimal(38,18)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT abs(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +4400 +NULL +0.0000000000 +0 +100 +10 +1 +0.1 +0.01 +200 +20 +2 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +0.3 +0.33 +0.333 +1.0 +2 +3.14 +1.12 +1.12 +1.122 +1.12 +1.122 +124.00 +125.2 +1255.49 +3.14 +3.14 +3.140 +1.0000000000 +1234567890.1234567890 +1234567890.1234567800 +PREHOOK: query: -- avg +EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- avg +EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: int), key (type: decimal(20,10)) + outputColumnNames: value, key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(key), count(key), avg(key) + keys: value (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(30,10)), _col2 (type: bigint), _col3 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0), count(VALUE._col1), avg(VALUE._col2) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), (_col1 / _col2) (type: decimal(38,23)), _col3 (type: decimal(24,14)), _col1 (type: decimal(30,10)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: decimal(38,23)), _col2 (type: decimal(24,14)), _col3 (type: decimal(30,10)) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: decimal(38,23)), VALUE._col1 (type: decimal(24,14)), VALUE._col2 (type: decimal(30,10)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.1234567890 +-1255 -1255.49 -1255.49 -1255.49 +-11 -1.122 -1.122 -1.122 +-1 -1.12 -1.12 -2.24 +0 0.02538461538461538461538 0.02538461538462 0.3300000000 +1 1.0484 1.0484 5.2420000000 +2 2 2 4 +3 3.14 3.14 9.42 +4 3.14 3.14 3.140 +10 10 10 10 +20 20 20 20 +100 100 100 100 +124 124 124 124.00 +125 125.2 125.2 125.2 +200 200 200 200 +4400 -4400 -4400 -4400 +1234567890 1234567890.12345678 1234567890.12345678 1234567890.1234567800 +PREHOOK: query: -- negative +EXPLAIN SELECT -key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- negative +EXPLAIN SELECT -key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (- key) (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT -key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT -key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +4400 +NULL +0.0000000000 +0 +-100 +-10 +-1 +-0.1 +-0.01 +-200 +-20 +-2 +0 +-0.2 +-0.02 +-0.3 +-0.33 +-0.333 +0.3 +0.33 +0.333 +-1.0 +-2 +-3.14 +1.12 +1.12 +1.122 +-1.12 +-1.122 +-124.00 +-125.2 +1255.49 +-3.14 +-3.14 +-3.140 +-1.0000000000 +1234567890.1234567890 +-1234567890.1234567800 +PREHOOK: query: -- positive +EXPLAIN SELECT +key FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- positive +EXPLAIN SELECT +key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: SELECT +key FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT +key FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400 +NULL +0.0000000000 +0 +100 +10 +1 +0.1 +0.01 +200 +20 +2 +0 +0.2 +0.02 +0.3 +0.33 +0.333 +-0.3 +-0.33 +-0.333 +1.0 +2 +3.14 +-1.12 +-1.12 +-1.122 +1.12 +1.122 +124.00 +125.2 +-1255.49 +3.14 +3.14 +3.140 +1.0000000000 +-1234567890.1234567890 +1234567890.1234567800 +PREHOOK: query: -- ceiling +EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- ceiling +EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ceil(key) (type: decimal(11,0)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT CEIL(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT CEIL(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400 +NULL +0 +0 +100 +10 +1 +1 +1 +200 +20 +2 +0 +1 +1 +1 +1 +1 +0 +0 +0 +1 +2 +4 +-1 +-1 +-1 +2 +2 +124 +126 +-1255 +4 +4 +4 +1 +-1234567890 +1234567891 +PREHOOK: query: -- floor +EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- floor +EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: floor(key) (type: decimal(11,0)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT FLOOR(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT FLOOR(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400 +NULL +0 +0 +100 +10 +1 +0 +0 +200 +20 +2 +0 +0 +0 +0 +0 +0 +-1 +-1 +-1 +1 +2 +3 +-2 +-2 +-2 +1 +1 +124 +125 +-1256 +3 +3 +3 +1 +-1234567891 +1234567890 +PREHOOK: query: -- round +EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- round +EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: round(key, 2) (type: decimal(13,2)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-4400.00 +NULL +0.00 +0.00 +100.00 +10.00 +1.00 +0.10 +0.01 +200.00 +20.00 +2.00 +0.00 +0.20 +0.02 +0.30 +0.33 +0.33 +-0.30 +-0.33 +-0.33 +1.00 +2.00 +3.14 +-1.12 +-1.12 +-1.12 +1.12 +1.12 +124.00 +125.20 +-1255.49 +3.14 +3.14 +3.14 +1.00 +-1234567890.12 +1234567890.12 +PREHOOK: query: -- power +EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- power +EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: power(key, 2) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +1.936E7 +NULL +0.0 +0.0 +10000.0 +100.0 +1.0 +0.010000000000000002 +1.0E-4 +40000.0 +400.0 +4.0 +0.0 +0.04000000000000001 +4.0E-4 +0.09 +0.10890000000000001 +0.11088900000000002 +0.09 +0.10890000000000001 +0.11088900000000002 +1.0 +4.0 +9.8596 +1.2544000000000002 +1.2544000000000002 +1.2588840000000003 +1.2544000000000002 +1.2588840000000003 +15376.0 +15675.04 +1576255.1401 +9.8596 +9.8596 +9.8596 +1.0 +1.52415787532388352E18 +1.52415787532388352E18 +PREHOOK: query: -- modulo +EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- modulo +EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ((key + 1) % (key / 2)) (type: decimal(22,12)) + outputColumnNames: _col0 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-2199 +NULL +NULL +NULL +1 +1 +0.0 +0.00 +0.000 +1 +1 +0 +NULL +0.0 +0.00 +0.10 +0.010 +0.0010 +0.10 +0.010 +0.0010 +0.0 +0 +1.00 +-0.12 +-0.12 +-0.122 +0.44 +0.439 +1.00 +1.0 +-626.745 +1.00 +1.00 +1.000 +0.0000000000 +-617283944.0617283945 +1.0000000000 +PREHOOK: query: -- stddev, var +EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- stddev, var +EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: int), key (type: decimal(20,10)) + outputColumnNames: value, key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: stddev(key), variance(key) + keys: value (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: stddev(VALUE._col0), variance(VALUE._col1) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890 0.0 0.0 +-1255 0.0 0.0 +-11 0.0 0.0 +-1 0.0 0.0 +0 0.22561046704494161 0.050900082840236685 +1 0.05928102563215321 0.0035142400000000066 +2 0.0 0.0 +3 0.0 0.0 +4 0.0 0.0 +10 0.0 0.0 +20 0.0 0.0 +100 0.0 0.0 +124 0.0 0.0 +125 0.0 0.0 +200 0.0 0.0 +4400 0.0 0.0 +1234567890 0.0 0.0 +PREHOOK: query: -- stddev_samp, var_samp +EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- stddev_samp, var_samp +EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: int), key (type: decimal(20,10)) + outputColumnNames: value, key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: stddev_samp(key), var_samp(key) + keys: value (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: stddev_samp(VALUE._col0), var_samp(VALUE._col1) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890 0.0 0.0 +-1255 0.0 0.0 +-11 0.0 0.0 +-1 0.0 0.0 +0 0.2348228191855647 0.055141756410256405 +1 0.06627820154470102 0.004392800000000008 +2 0.0 0.0 +3 0.0 0.0 +4 0.0 0.0 +10 0.0 0.0 +20 0.0 0.0 +100 0.0 0.0 +124 0.0 0.0 +125 0.0 0.0 +200 0.0 0.0 +4400 0.0 0.0 +1234567890 0.0 0.0 +PREHOOK: query: -- histogram +EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- histogram +EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: histogram_numeric(key, 3) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: array) + Reduce Operator Tree: + Group By Operator + aggregations: histogram_numeric(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: _col0 (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +[{"x":-1.2345678901234567E9,"y":1.0},{"x":-144.50057142857142,"y":35.0},{"x":1.2345678901234567E9,"y":1.0}] +PREHOOK: query: -- min +EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- min +EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(20,10)) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +-1234567890.1234567890 +PREHOOK: query: -- max +EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- max +EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(20,10)) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +1234567890.1234567800 +PREHOOK: query: -- count +EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +POSTHOOK: query: -- count +EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)) + outputColumnNames: key + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(key) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(key) FROM DECIMAL_UDF +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(key) FROM DECIMAL_UDF +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +37 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf_txt +PREHOOK: Output: default@decimal_udf_txt +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf_txt +POSTHOOK: Output: default@decimal_udf_txt +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf +PREHOOK: Output: default@decimal_udf +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf +POSTHOOK: Output: default@decimal_udf Index: ql/src/test/results/clientpositive/vector_decimal_udf2.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_udf2.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_decimal_udf2.q.out (working copy) @@ -0,0 +1,181 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_UDF2_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF2_txt +POSTHOOK: query: CREATE TABLE DECIMAL_UDF2_txt (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF2_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_udf2_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_udf2_txt +PREHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF2 +POSTHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF2 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2_txt +PREHOOK: Output: default@decimal_udf2 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2_txt +POSTHOOK: Output: default@decimal_udf2 +POSTHOOK: Lineage: decimal_udf2.key SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ] +POSTHOOK: Lineage: decimal_udf2.value SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 10) (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: acos(key) (type: double), asin(key) (type: double), atan(key) (type: double), cos(key) (type: double), sin(key) (type: double), tan(key) (type: double), radians(key) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +NaN NaN 1.4711276743037347 -0.8390715290764524 -0.5440211108893698 0.6483608274590866 0.17453292519943295 +PREHOOK: query: EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf2 + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 10) (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: exp(key) (type: double), ln(key) (type: double), log(key) (type: double), log(key, key) (type: double), log(key, value) (type: double), log(value, key) (type: double), log10(key) (type: double), sqrt(key) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +22026.465794806718 2.302585092994046 2.302585092994046 1.0 1.0 1.0 1.0 3.1622776601683795 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf2_txt +PREHOOK: Output: default@decimal_udf2_txt +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf2_txt +POSTHOOK: Output: default@decimal_udf2_txt +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf2 +PREHOOK: Output: default@decimal_udf2 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Output: default@decimal_udf2 Index: ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out (working copy) @@ -1,104 +1,6 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part -PREHOOK: query: DROP TABLE lineitem -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE lineitem -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@lineitem -POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, - L_PARTKEY INT, - L_SUPPKEY INT, - L_LINENUMBER INT, - L_QUANTITY DOUBLE, - L_EXTENDEDPRICE DOUBLE, - L_DISCOUNT DOUBLE, - L_TAX DOUBLE, - L_RETURNFLAG STRING, - L_LINESTATUS STRING, - l_shipdate STRING, - L_COMMITDATE STRING, - L_RECEIPTDATE STRING, - L_SHIPINSTRUCT STRING, - L_SHIPMODE STRING, - L_COMMENT STRING) -ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|' -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@lineitem -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@lineitem -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@lineitem -PREHOOK: query: -- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. +-- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. -- Query copied from subquery_in.q -- non agg, non corr, with join in Parent Query @@ -108,7 +10,9 @@ where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') PREHOOK: type: QUERY -POSTHOOK: query: -- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. -- Query copied from subquery_in.q -- non agg, non corr, with join in Parent Query @@ -130,34 +34,34 @@ Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: l_partkey is not null (type: boolean) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_partkey (type: int) outputColumnNames: l_partkey - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: l_partkey (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -178,10 +82,10 @@ li TableScan alias: li - Statistics: Num rows: 756 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_partkey is not null and l_orderkey is not null) and (l_linenumber = 1)) (type: boolean) - Statistics: Num rows: 94 Data size: 1504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12 Data size: 1439 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {_col0} @@ -192,19 +96,19 @@ sq_1:lineitem TableScan alias: lineitem - Statistics: Num rows: 1728 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((l_shipmode = 'AIR') and l_orderkey is not null) (type: boolean) - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_orderkey (type: int) outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {_col0} {_col3} @@ -227,7 +131,7 @@ 0 _col0 (type: int) 1 l_partkey (type: int) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Semi Join 0 to 1 @@ -238,14 +142,14 @@ 0 _col1 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col3 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -309,34 +213,34 @@ Map Operator Tree: TableScan alias: lineitem - Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: l_partkey is not null (type: boolean) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_partkey (type: int) outputColumnNames: l_partkey - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: l_partkey (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -357,10 +261,10 @@ li TableScan alias: li - Statistics: Num rows: 756 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((l_partkey is not null and l_orderkey is not null) and l_linenumber is not null) and (l_linenumber = 1)) (type: boolean) - Statistics: Num rows: 47 Data size: 752 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {_col0} @@ -371,19 +275,19 @@ sq_1:lineitem TableScan alias: lineitem - Statistics: Num rows: 1099 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + predicate: ((((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) and (l_linenumber = 1)) (type: boolean) + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int), l_linenumber (type: int) + expressions: l_orderkey (type: int), 1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 1519 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 719 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {_col0} {_col3} @@ -406,7 +310,7 @@ 0 _col0 (type: int) 1 l_partkey (type: int) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Semi Join 0 to 1 @@ -417,14 +321,14 @@ 0 _col1 (type: int), 1 (type: int) 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col0, _col3 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 29 Data size: 3627 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/vector_multi_insert.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_multi_insert.q.out (revision 0) +++ ql/src/test/results/clientpositive/vector_multi_insert.q.out (working copy) @@ -0,0 +1,350 @@ +PREHOOK: query: create table orc1 + stored as orc + tblproperties("orc.compress"="ZLIB") + as + select rn + from + ( + select cast(1 as int) as rn from src limit 1 + union all + select cast(100 as int) as rn from src limit 1 + union all + select cast(10000 as int) as rn from src limit 1 + ) t +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@orc1 +POSTHOOK: query: create table orc1 + stored as orc + tblproperties("orc.compress"="ZLIB") + as + select rn + from + ( + select cast(1 as int) as rn from src limit 1 + union all + select cast(100 as int) as rn from src limit 1 + union all + select cast(10000 as int) as rn from src limit 1 + ) t +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc1 +PREHOOK: query: create table orc_rn1 (rn int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_rn1 +POSTHOOK: query: create table orc_rn1 (rn int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_rn1 +PREHOOK: query: create table orc_rn2 (rn int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_rn2 +POSTHOOK: query: create table orc_rn2 (rn int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_rn2 +PREHOOK: query: create table orc_rn3 (rn int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_rn3 +POSTHOOK: query: create table orc_rn3 (rn int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_rn3 +PREHOOK: query: explain from orc1 a +insert overwrite table orc_rn1 select a.* where a.rn < 100 +insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000 +insert overwrite table orc_rn3 select a.* where a.rn >= 1000 +PREHOOK: type: QUERY +POSTHOOK: query: explain from orc1 a +insert overwrite table orc_rn1 select a.* where a.rn < 100 +insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000 +insert overwrite table orc_rn3 select a.* where a.rn >= 1000 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-3 is a root stage + Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-0 depends on stages: Stage-6, Stage-5, Stage-8 + Stage-4 depends on stages: Stage-0 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 + Stage-15 depends on stages: Stage-3 , consists of Stage-12, Stage-11, Stage-13 + Stage-12 + Stage-1 depends on stages: Stage-12, Stage-11, Stage-14 + Stage-10 depends on stages: Stage-1 + Stage-11 + Stage-13 + Stage-14 depends on stages: Stage-13 + Stage-21 depends on stages: Stage-3 , consists of Stage-18, Stage-17, Stage-19 + Stage-18 + Stage-2 depends on stages: Stage-18, Stage-17, Stage-20 + Stage-16 depends on stages: Stage-2 + Stage-17 + Stage-19 + Stage-20 depends on stages: Stage-19 + +STAGE PLANS: + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (rn < 100) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: rn (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn1 + Filter Operator + predicate: ((rn >= 100) and (rn < 1000)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: rn (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn2 + Filter Operator + predicate: (rn >= 1000) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: rn (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn3 + Execution mode: vectorized + + Stage: Stage-9 + Conditional Operator + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn1 + + Stage: Stage-7 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn1 + + Stage: Stage-8 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-15 + Conditional Operator + + Stage: Stage-12 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn2 + + Stage: Stage-10 + Stats-Aggr Operator + + Stage: Stage-11 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn2 + + Stage: Stage-13 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn2 + + Stage: Stage-14 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-21 + Conditional Operator + + Stage: Stage-18 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn3 + + Stage: Stage-16 + Stats-Aggr Operator + + Stage: Stage-17 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn3 + + Stage: Stage-19 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.orc_rn3 + + Stage: Stage-20 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: from orc1 a +insert overwrite table orc_rn1 select a.* where a.rn < 100 +insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000 +insert overwrite table orc_rn3 select a.* where a.rn >= 1000 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc1 +PREHOOK: Output: default@orc_rn1 +PREHOOK: Output: default@orc_rn2 +PREHOOK: Output: default@orc_rn3 +POSTHOOK: query: from orc1 a +insert overwrite table orc_rn1 select a.* where a.rn < 100 +insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000 +insert overwrite table orc_rn3 select a.* where a.rn >= 1000 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc1 +POSTHOOK: Output: default@orc_rn1 +POSTHOOK: Output: default@orc_rn2 +POSTHOOK: Output: default@orc_rn3 +POSTHOOK: Lineage: orc_rn1.rn SIMPLE [(orc1)a.FieldSchema(name:rn, type:int, comment:null), ] +POSTHOOK: Lineage: orc_rn2.rn SIMPLE [(orc1)a.FieldSchema(name:rn, type:int, comment:null), ] +POSTHOOK: Lineage: orc_rn3.rn SIMPLE [(orc1)a.FieldSchema(name:rn, type:int, comment:null), ] +PREHOOK: query: select * from orc_rn1 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_rn1 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_rn1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_rn1 +#### A masked pattern was here #### +1 +PREHOOK: query: select * from orc_rn2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_rn2 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_rn2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_rn2 +#### A masked pattern was here #### +100 +PREHOOK: query: select * from orc_rn3 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_rn3 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_rn3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_rn3 +#### A masked pattern was here #### +10000 Index: ql/src/test/results/clientpositive/vectorized_ptf.q.out =================================================================== --- ql/src/test/results/clientpositive/vectorized_ptf.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/vectorized_ptf.q.out (working copy) @@ -2,9 +2,9 @@ PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE part_staging POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE part +PREHOOK: query: DROP TABLE part_orc PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part +POSTHOOK: query: DROP TABLE part_orc POSTHOOK: type: DROPTABLE PREHOOK: query: -- NOTE: This test is a copy of ptf. -- NOTE: We cannot vectorize "pure" table functions (e.g. NOOP) -- given their blackbox nature. So only queries without table functions and @@ -52,7 +52,7 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_staging -PREHOOK: query: CREATE TABLE part( +PREHOOK: query: CREATE TABLE part_orc( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -65,8 +65,8 @@ ) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: CREATE TABLE part( +PREHOOK: Output: default@part_orc +POSTHOOK: query: CREATE TABLE part_orc( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -79,13 +79,13 @@ ) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: DESCRIBE EXTENDED part +POSTHOOK: Output: default@part_orc +PREHOOK: query: DESCRIBE EXTENDED part_orc PREHOOK: type: DESCTABLE -PREHOOK: Input: default@part -POSTHOOK: query: DESCRIBE EXTENDED part +PREHOOK: Input: default@part_orc +POSTHOOK: query: DESCRIBE EXTENDED part_orc POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc p_partkey int p_name string p_mfgr string @@ -97,23 +97,23 @@ p_comment string #### A masked pattern was here #### -PREHOOK: query: insert into table part select * from part_staging +PREHOOK: query: insert into table part_orc select * from part_staging PREHOOK: type: QUERY PREHOOK: Input: default@part_staging -PREHOOK: Output: default@part -POSTHOOK: query: insert into table part select * from part_staging +PREHOOK: Output: default@part_orc +POSTHOOK: query: insert into table part_orc select * from part_staging POSTHOOK: type: QUERY POSTHOOK: Input: default@part_staging -POSTHOOK: Output: default@part -POSTHOOK: Lineage: part.p_brand SIMPLE [(part_staging)part_staging.FieldSchema(name:p_brand, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_comment SIMPLE [(part_staging)part_staging.FieldSchema(name:p_comment, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_container SIMPLE [(part_staging)part_staging.FieldSchema(name:p_container, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_mfgr SIMPLE [(part_staging)part_staging.FieldSchema(name:p_mfgr, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_name SIMPLE [(part_staging)part_staging.FieldSchema(name:p_name, type:string, comment:null), ] -POSTHOOK: Lineage: part.p_partkey SIMPLE [(part_staging)part_staging.FieldSchema(name:p_partkey, type:int, comment:null), ] -POSTHOOK: Lineage: part.p_retailprice SIMPLE [(part_staging)part_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] -POSTHOOK: Lineage: part.p_size SIMPLE [(part_staging)part_staging.FieldSchema(name:p_size, type:int, comment:null), ] -POSTHOOK: Lineage: part.p_type SIMPLE [(part_staging)part_staging.FieldSchema(name:p_type, type:string, comment:null), ] +POSTHOOK: Output: default@part_orc +POSTHOOK: Lineage: part_orc.p_brand SIMPLE [(part_staging)part_staging.FieldSchema(name:p_brand, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_comment SIMPLE [(part_staging)part_staging.FieldSchema(name:p_comment, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_container SIMPLE [(part_staging)part_staging.FieldSchema(name:p_container, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_mfgr SIMPLE [(part_staging)part_staging.FieldSchema(name:p_mfgr, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_name SIMPLE [(part_staging)part_staging.FieldSchema(name:p_name, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc.p_partkey SIMPLE [(part_staging)part_staging.FieldSchema(name:p_partkey, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc.p_retailprice SIMPLE [(part_staging)part_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] +POSTHOOK: Lineage: part_orc.p_size SIMPLE [(part_staging)part_staging.FieldSchema(name:p_size, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc.p_type SIMPLE [(part_staging)part_staging.FieldSchema(name:p_type, type:string, comment:null), ] PREHOOK: query: --1. test1 explain extended @@ -121,7 +121,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -133,7 +133,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -146,7 +146,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -226,7 +226,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -242,7 +242,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -252,11 +252,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -272,20 +272,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -394,23 +394,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -443,7 +443,7 @@ explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY @@ -452,7 +452,7 @@ explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) POSTHOOK: type: QUERY @@ -468,11 +468,11 @@ TOK_JOIN TOK_TABREF TOK_TABNAME - part + part_orc p1 TOK_TABREF TOK_TABNAME - part + part_orc p2 = . @@ -588,7 +588,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -598,11 +598,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -618,20 +618,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [j:p1, j:p2] + /part_orc [j:p1, j:p2] Execution mode: vectorized Needs Tagging: true Reduce Operator Tree: @@ -809,19 +809,19 @@ PREHOOK: query: select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 0 Manufacturer#1 almond antique burnished rose metallic 2 0 @@ -855,7 +855,7 @@ explain extended select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -863,7 +863,7 @@ explain extended select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -875,7 +875,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -909,7 +909,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -925,7 +925,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -935,11 +935,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -955,20 +955,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -1008,18 +1008,18 @@ ListSink PREHOOK: query: select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 Manufacturer#1 almond antique burnished rose metallic 2 @@ -1054,7 +1054,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc @@ -1066,7 +1066,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc @@ -1080,7 +1080,7 @@ abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1160,7 +1160,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -1176,7 +1176,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1186,11 +1186,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1206,20 +1206,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -1328,23 +1328,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) abc POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -1379,7 +1379,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1391,7 +1391,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1404,7 +1404,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1489,7 +1489,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -1505,7 +1505,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1515,11 +1515,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1535,20 +1535,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -1657,23 +1657,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 @@ -1708,7 +1708,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1721,7 +1721,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) @@ -1735,7 +1735,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -1828,7 +1828,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -1844,7 +1844,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -1854,11 +1854,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -1874,20 +1874,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -2063,25 +2063,25 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) group by p_mfgr, p_name, p_size PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name ) group by p_mfgr, p_name, p_size POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 Manufacturer#1 almond antique chartreuse lavender yellow 34 2 2 34 32 @@ -2112,19 +2112,19 @@ explain extended select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY POSTHOOK: query: -- 7. testJoin explain extended select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: @@ -2136,7 +2136,7 @@ abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -2147,7 +2147,7 @@ p_name TOK_TABREF TOK_TABNAME - part + part_orc p1 = . @@ -2179,7 +2179,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -2195,7 +2195,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2205,11 +2205,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2225,20 +2225,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -2324,7 +2324,7 @@ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2334,11 +2334,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2354,20 +2354,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] #### A masked pattern was here #### Needs Tagging: true Reduce Operator Tree: @@ -2412,20 +2412,20 @@ ListSink PREHOOK: query: select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select abc.* -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### 15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu 17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the @@ -2459,7 +2459,7 @@ explain extended select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey @@ -2468,7 +2468,7 @@ explain extended select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey @@ -2480,14 +2480,14 @@ TOK_JOIN TOK_TABREF TOK_TABNAME - part + part_orc p1 TOK_PTBLFUNCTION noop abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -2526,7 +2526,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -2542,7 +2542,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2552,11 +2552,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2572,20 +2572,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -2671,7 +2671,7 @@ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2681,11 +2681,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2701,20 +2701,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] #### A masked pattern was here #### Needs Tagging: true Reduce Operator Tree: @@ -2759,20 +2759,20 @@ ListSink PREHOOK: query: select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select abc.* -from part p1 join noop(on part +from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### 15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu 17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the @@ -2807,7 +2807,7 @@ explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY @@ -2816,7 +2816,7 @@ explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) POSTHOOK: type: QUERY @@ -2828,7 +2828,7 @@ noopwithmap TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -2882,7 +2882,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false PTF Operator @@ -2900,7 +2900,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -2910,11 +2910,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -2930,20 +2930,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -3050,19 +3050,19 @@ PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 Manufacturer#1 almond antique burnished rose metallic 2 1 @@ -3097,7 +3097,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -3108,7 +3108,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -3120,7 +3120,7 @@ noopwithmap TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3200,7 +3200,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false PTF Operator @@ -3218,7 +3218,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -3228,11 +3228,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -3248,20 +3248,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -3370,21 +3370,21 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noopwithmap(on part +from noopwithmap(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -3419,7 +3419,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -3430,7 +3430,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -3442,7 +3442,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3522,7 +3522,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -3538,7 +3538,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -3548,11 +3548,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -3568,20 +3568,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -3690,21 +3690,21 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -3739,7 +3739,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) @@ -3751,7 +3751,7 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) @@ -3768,7 +3768,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -3852,7 +3852,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -3868,7 +3868,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -3878,11 +3878,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -3898,20 +3898,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -4084,23 +4084,23 @@ rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 -from noop(on noopwithmap(on noop(on part +from noop(on noopwithmap(on noop(on part_orc partition by p_mfgr order by p_mfgr, p_name ))) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 @@ -4137,7 +4137,7 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -4152,7 +4152,7 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -4169,7 +4169,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -4266,7 +4266,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -4282,7 +4282,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4292,11 +4292,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4312,20 +4312,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [sub1:part] + /part_orc [sub1:part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -4436,13 +4436,13 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) ) sub1 PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, sub1.cd, sub1.s1 @@ -4450,13 +4450,13 @@ count(p_size) over (partition by p_mfgr order by p_name) as cd, p_retailprice, sum(p_retailprice) over w1 as s1 -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) ) sub1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 4100.06 Manufacturer#1 almond antique burnished rose metallic 2 5702.650000000001 @@ -4493,10 +4493,10 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount @@ -4507,10 +4507,10 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: @@ -4522,7 +4522,7 @@ abc TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -4533,7 +4533,7 @@ p_name TOK_TABREF TOK_TABNAME - part + part_orc p1 = . @@ -4693,7 +4693,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -4709,7 +4709,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4719,11 +4719,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4739,20 +4739,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -4838,7 +4838,7 @@ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -4848,11 +4848,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -4868,20 +4868,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [p1] + /part_orc [p1] #### A masked pattern was here #### Needs Tagging: true Reduce Operator Tree: @@ -4993,12 +4993,12 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, @@ -5006,12 +5006,12 @@ count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name -) abc join part p1 on abc.p_partkey = p1.p_partkey +) abc join part_orc p1 on abc.p_partkey = p1.p_partkey POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 1173.15 2 0 Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 2346.3 2 0 @@ -5045,7 +5045,7 @@ explain extended select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY @@ -5053,7 +5053,7 @@ explain extended select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY @@ -5065,7 +5065,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -5100,7 +5100,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -5116,7 +5116,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -5126,11 +5126,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -5146,20 +5146,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -5270,18 +5270,18 @@ ListSink PREHOOK: query: select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select DISTINCT p_mfgr, p_name, p_size -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 2 Manufacturer#1 almond antique chartreuse lavender yellow 34 @@ -5312,20 +5312,20 @@ create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s -from part +from part_orc group by p_mfgr, p_brand PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_price_view POSTHOOK: query: -- 16. testViewAsTableInputToPTF create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s -from part +from part_orc group by p_mfgr, p_brand POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_price_view PREHOOK: query: explain extended @@ -5412,7 +5412,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -5438,7 +5438,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -5448,11 +5448,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -5468,20 +5468,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [mfgr_price_view:part] + /part_orc [mfgr_price_view:part_orc] Execution mode: vectorized Needs Tagging: false Reduce Operator Tree: @@ -5659,7 +5659,7 @@ window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row) PREHOOK: type: QUERY PREHOOK: Input: default@mfgr_price_view -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_brand, s, sum(s) over w1 as s1 @@ -5669,7 +5669,7 @@ window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row) POSTHOOK: type: QUERY POSTHOOK: Input: default@mfgr_price_view -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 Brand#12 4800.84 4800.84 Manufacturer#1 Brand#14 2346.3 7147.14 @@ -5734,7 +5734,7 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@part_5 PREHOOK: query: explain extended -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -5750,7 +5750,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY POSTHOOK: query: explain extended -from noop(on part +from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -5773,7 +5773,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -5973,7 +5973,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -5989,7 +5989,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -5999,11 +5999,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -6019,20 +6019,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -6342,7 +6342,7 @@ Stats-Aggr Operator #### A masked pattern was here #### -PREHOOK: query: from noop(on part +PREHOOK: query: from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -6357,10 +6357,10 @@ first_value(p_size, true) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc PREHOOK: Output: default@part_4 PREHOOK: Output: default@part_5 -POSTHOOK: query: from noop(on part +POSTHOOK: query: from noop(on part_orc partition by p_mfgr order by p_name) INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, @@ -6375,23 +6375,23 @@ first_value(p_size, true) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc POSTHOOK: Output: default@part_4 POSTHOOK: Output: default@part_5 -POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.dr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), (part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), (part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), (part_orc)part_orc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part_orc)part_orc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part_orc)part_orc.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_4 PREHOOK: type: QUERY PREHOOK: Input: default@part_4 @@ -6470,7 +6470,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6489,7 +6489,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6512,7 +6512,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6613,7 +6613,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -6629,7 +6629,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -6639,11 +6639,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -6659,20 +6659,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -6848,7 +6848,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6857,7 +6857,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, @@ -6866,7 +6866,7 @@ from noop(on noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6875,7 +6875,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -6913,7 +6913,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6932,7 +6932,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -6955,7 +6955,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7058,7 +7058,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -7074,7 +7074,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -7084,11 +7084,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -7104,20 +7104,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -7353,7 +7353,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -7362,7 +7362,7 @@ partition by p_mfgr order by p_mfgr ) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -7371,7 +7371,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr order by p_mfgr) ) @@ -7380,7 +7380,7 @@ partition by p_mfgr order by p_mfgr ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -7418,7 +7418,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -7435,7 +7435,7 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -7456,7 +7456,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7546,7 +7546,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -7562,7 +7562,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -7572,11 +7572,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -7592,20 +7592,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -7779,14 +7779,14 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) partition by p_mfgr order by p_mfgr)) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -7795,14 +7795,14 @@ from noop(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) partition by p_mfgr order by p_mfgr)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -7840,7 +7840,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -7859,7 +7859,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -7882,7 +7882,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7984,7 +7984,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -8000,7 +8000,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -8010,11 +8010,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -8030,20 +8030,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -8281,7 +8281,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -8290,7 +8290,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, @@ -8299,7 +8299,7 @@ from noopwithmap(on noop(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr,p_name order by p_mfgr,p_name) ) @@ -8308,7 +8308,7 @@ partition by p_mfgr,p_name order by p_mfgr,p_name) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 @@ -8347,7 +8347,7 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr @@ -8365,7 +8365,7 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr @@ -8384,7 +8384,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -8517,7 +8517,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -8533,7 +8533,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -8543,11 +8543,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -8563,20 +8563,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -8753,14 +8753,14 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr order by p_mfgr )) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, @@ -8770,14 +8770,14 @@ sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 from noop(on noopwithmap(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) partition by p_mfgr order by p_mfgr )) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 2 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 @@ -8816,7 +8816,7 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) @@ -8832,7 +8832,7 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) @@ -8849,7 +8849,7 @@ noop TOK_TABREF TOK_TABNAME - part + part_orc TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -8954,7 +8954,7 @@ Map Reduce Map Operator Tree: TableScan - alias: part + alias: part_orc Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator @@ -8970,7 +8970,7 @@ Path -> Partition: #### A masked pattern was here #### Partition - base file name: part + base file name: part_orc input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat properties: @@ -8980,11 +8980,11 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 @@ -9000,20 +9000,20 @@ columns.comments columns.types int:string:string:string:string:int:string:double:string #### A masked pattern was here #### - name default.part + name default.part_orc numFiles 1 numRows 26 rawDataSize 16042 - serialization.ddl struct part { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} + serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2597 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part - name: default.part + name: default.part_orc + name: default.part_orc Truncated Path -> Alias: - /part [part] + /part_orc [part_orc] Needs Tagging: false Reduce Operator Tree: Extract @@ -9190,12 +9190,12 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) PREHOOK: type: QUERY -PREHOOK: Input: default@part +PREHOOK: Input: default@part_orc #### A masked pattern was here #### POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -9205,12 +9205,12 @@ sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 from noopwithmap(on noop(on - noop(on part + noop(on part_orc partition by p_mfgr, p_name order by p_mfgr, p_name) )) POSTHOOK: type: QUERY -POSTHOOK: Input: default@part +POSTHOOK: Input: default@part_orc #### A masked pattern was here #### Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 Index: ql/src/test/results/clientpositive/windowing.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/windowing.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- 1. testWindowing select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, Index: ql/src/test/results/clientpositive/windowing_adjust_rowcontainer_sz.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing_adjust_rowcontainer_sz.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/windowing_adjust_rowcontainer_sz.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, Index: ql/src/test/results/clientpositive/windowing_columnPruning.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing_columnPruning.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/windowing_columnPruning.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: -- 1. testQueryLevelPartitionColsNotInSelect select p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 Index: ql/src/test/results/clientpositive/windowing_decimal.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing_decimal.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/windowing_decimal.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: create table part_dec( p_partkey INT, p_name STRING, Index: ql/src/test/results/clientpositive/windowing_expressions.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing_expressions.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/windowing_expressions.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: drop table over10k PREHOOK: type: DROPTABLE POSTHOOK: query: drop table over10k Index: ql/src/test/results/clientpositive/windowing_streaming.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing_streaming.q.out (revision 1637277) +++ ql/src/test/results/clientpositive/windowing_streaming.q.out (working copy) @@ -1,45 +1,3 @@ -PREHOOK: query: DROP TABLE if exists part -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE if exists part -POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@part -POSTHOOK: query: -- data setup -CREATE TABLE part( - p_partkey INT, - p_name STRING, - p_mfgr STRING, - p_brand STRING, - p_type STRING, - p_size INT, - p_container STRING, - p_retailprice DOUBLE, - p_comment STRING -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@part -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@part -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@part PREHOOK: query: drop table over10k PREHOOK: type: DROPTABLE POSTHOOK: query: drop table over10k @@ -106,25 +64,25 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_name (type: string) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE value expressions: p_name (type: string), p_mfgr (type: string) Reduce Operator Tree: Extract - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _wcol0 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -156,29 +114,29 @@ Map Operator Tree: TableScan alias: part - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: p_mfgr (type: string), p_name (type: string) sort order: ++ Map-reduce partition columns: p_mfgr (type: string) - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.8 value expressions: p_name (type: string), p_mfgr (type: string) Reduce Operator Tree: Extract - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_wcol0 < 4) (type: boolean) - Statistics: Num rows: 5 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _wcol0 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1057 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java (revision 1637277) +++ serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java (working copy) @@ -92,10 +92,10 @@ final String columnNameProperty = properties.getProperty(serdeConstants.LIST_COLUMNS); final String columnTypeProperty = properties.getProperty(serdeConstants.LIST_COLUMN_TYPES); - final String columnCommentProperty = properties.getProperty(LIST_COLUMN_COMMENTS); + final String columnCommentProperty = properties.getProperty(LIST_COLUMN_COMMENTS,""); - if (properties.getProperty(AvroSerdeUtils.SCHEMA_LITERAL) != null - || properties.getProperty(AvroSerdeUtils.SCHEMA_URL) != null + if (properties.getProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) != null + || properties.getProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_URL.getPropName()) != null || columnNameProperty == null || columnNameProperty.isEmpty() || columnTypeProperty == null || columnTypeProperty.isEmpty()) { schema = determineSchemaOrReturnErrorSchema(properties); @@ -104,28 +104,8 @@ columnNames = Arrays.asList(columnNameProperty.split(",")); columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty); - List columnComments; - if (columnCommentProperty.isEmpty()) { - columnComments = new ArrayList(); - } else { - columnComments = Arrays.asList(columnCommentProperty.split(",")); - LOG.info("columnComments is " + columnCommentProperty); - } - if (columnNames.size() != columnTypes.size()) { - throw new IllegalArgumentException("AvroSerde initialization failed. Number of column " + - "name and column type differs. columnNames = " + columnNames + ", columnTypes = " + - columnTypes); - } - - final String tableName = properties.getProperty(TABLE_NAME); - final String tableComment = properties.getProperty(TABLE_COMMENT); - TypeInfoToSchema typeInfoToSchema = new TypeInfoToSchema(); - schema = typeInfoToSchema.convert(columnNames, columnTypes, columnComments, - properties.getProperty(AvroSerdeUtils.SCHEMA_NAMESPACE), - properties.getProperty(AvroSerdeUtils.SCHEMA_NAME, tableName), - properties.getProperty(AvroSerdeUtils.SCHEMA_DOC, tableComment)); - - properties.setProperty(AvroSerdeUtils.SCHEMA_LITERAL, schema.toString()); + schema = getSchemaFromCols(properties, columnNames, columnTypes, columnCommentProperty); + properties.setProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), schema.toString()); } LOG.info("Avro schema is " + schema); @@ -133,7 +113,8 @@ if (configuration == null) { LOG.info("Configuration null, not inserting schema"); } else { - configuration.set(AvroSerdeUtils.AVRO_SERDE_SCHEMA, schema.toString(false)); + configuration.set( + AvroSerdeUtils.AvroTableProperties.AVRO_SERDE_SCHEMA.getPropName(), schema.toString(false)); } badSchema = schema.equals(SchemaResolutionProblem.SIGNAL_BAD_SCHEMA); @@ -144,6 +125,31 @@ this.oi = aoig.getObjectInspector(); } + public static Schema getSchemaFromCols(Properties properties, + List columnNames, List columnTypes, String columnCommentProperty) { + List columnComments; + if (columnCommentProperty == null || columnCommentProperty.isEmpty()) { + columnComments = new ArrayList(); + } else { + columnComments = Arrays.asList(columnCommentProperty.split(",")); + LOG.info("columnComments is " + columnCommentProperty); + } + if (columnNames.size() != columnTypes.size()) { + throw new IllegalArgumentException("AvroSerde initialization failed. Number of column " + + "name and column type differs. columnNames = " + columnNames + ", columnTypes = " + + columnTypes); + } + + final String tableName = properties.getProperty(TABLE_NAME); + final String tableComment = properties.getProperty(TABLE_COMMENT); + TypeInfoToSchema typeInfoToSchema = new TypeInfoToSchema(); + return typeInfoToSchema.convert(columnNames, columnTypes, columnComments, + properties.getProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_NAMESPACE.getPropName()), + properties.getProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_NAME.getPropName(), tableName), + properties.getProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_DOC.getPropName(), tableComment)); + + } + /** * Attempt to determine the schema via the usual means, but do not throw * an exception if we fail. Instead, signal failure via a special Index: serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java (revision 1637277) +++ serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java (working copy) @@ -48,17 +48,48 @@ public class AvroSerdeUtils { private static final Log LOG = LogFactory.getLog(AvroSerdeUtils.class); - public static final String SCHEMA_LITERAL = "avro.schema.literal"; - public static final String SCHEMA_URL = "avro.schema.url"; + /** + * Enum container for all avro table properties. + * If introducing a new avro-specific table property, + * add it here. Putting them in an enum rather than separate strings + * allows them to be programmatically grouped and referenced together. + */ + public static enum AvroTableProperties { + SCHEMA_LITERAL("avro.schema.literal"), + SCHEMA_URL("avro.schema.url"), + SCHEMA_NAMESPACE("avro.schema.namespace"), + SCHEMA_NAME("avro.schema.name"), + SCHEMA_DOC("avro.schema.doc"), + AVRO_SERDE_SCHEMA("avro.serde.schema"), + SCHEMA_RETRIEVER("avro.schema.retriever"); + + private final String propName; + + AvroTableProperties(String propName) { + this.propName = propName; + } + + public String getPropName(){ + return this.propName; + } + } + + // Following parameters slated for removal, prefer usage of enum above, that allows programmatic access. + @Deprecated public static final String SCHEMA_LITERAL = "avro.schema.literal"; + @Deprecated public static final String SCHEMA_URL = "avro.schema.url"; + @Deprecated public static final String SCHEMA_NAMESPACE = "avro.schema.namespace"; + @Deprecated public static final String SCHEMA_NAME = "avro.schema.name"; + @Deprecated public static final String SCHEMA_DOC = "avro.schema.doc"; + @Deprecated public static final String AVRO_SERDE_SCHEMA = AvroTableProperties.AVRO_SERDE_SCHEMA.getPropName(); + @Deprecated public static final String SCHEMA_RETRIEVER = AvroTableProperties.SCHEMA_RETRIEVER.getPropName(); + public static final String SCHEMA_NONE = "none"; - public static final String SCHEMA_NAMESPACE = "avro.schema.namespace"; - public static final String SCHEMA_NAME = "avro.schema.name"; - public static final String SCHEMA_DOC = "avro.schema.doc"; - public static final String EXCEPTION_MESSAGE = "Neither " + SCHEMA_LITERAL + " nor " - + SCHEMA_URL + " specified, can't determine table schema"; - public static final String AVRO_SERDE_SCHEMA = "avro.serde.schema"; - public static final String SCHEMA_RETRIEVER = "avro.schema.retriever"; + public static final String EXCEPTION_MESSAGE = "Neither " + + AvroTableProperties.SCHEMA_LITERAL.getPropName() + " nor " + + AvroTableProperties.SCHEMA_URL.getPropName() + " specified, can't determine table schema"; + + /** * Determine the schema to that's been provided for Avro serde work. * @param properties containing a key pointing to the schema, one way or another @@ -68,12 +99,12 @@ */ public static Schema determineSchemaOrThrowException(Properties properties) throws IOException, AvroSerdeException { - String schemaString = properties.getProperty(SCHEMA_LITERAL); + String schemaString = properties.getProperty(AvroTableProperties.SCHEMA_LITERAL.getPropName()); if(schemaString != null && !schemaString.equals(SCHEMA_NONE)) return AvroSerdeUtils.getSchemaFor(schemaString); // Try pulling directly from URL - schemaString = properties.getProperty(SCHEMA_URL); + schemaString = properties.getProperty(AvroTableProperties.SCHEMA_URL.getPropName()); if(schemaString == null || schemaString.equals(SCHEMA_NONE)) throw new AvroSerdeException(EXCEPTION_MESSAGE); Index: serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java (revision 1637277) +++ serde/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java (working copy) @@ -62,6 +62,10 @@ set(value.unscaledValue().toByteArray(), value.scale()); } + public void set(HiveDecimal value, int maxPrecision, int maxScale) { + set(HiveDecimal.enforcePrecisionScale(value, maxPrecision, maxScale)); + } + public void set(HiveDecimalWritable writable) { set(writable.getHiveDecimal()); } Index: serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java =================================================================== --- serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java (revision 1637277) +++ serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java (working copy) @@ -80,6 +80,7 @@ private TypeInfoToSchema typeInfoToSchema; + private final String lineSeparator = System.getProperty("line.separator"); private String getAvroSchemaString(TypeInfo columnType) { return typeInfoToSchema.convert( @@ -383,7 +384,7 @@ LOGGER.info("structTypeInfo is " + structTypeInfo); final String specificSchema = IOUtils.toString(Resources.getResource("avro-struct.avsc") - .openStream()).replace("\n", ""); + .openStream()).replace(lineSeparator, ""); String expectedSchema = genSchema( specificSchema); @@ -414,7 +415,7 @@ superStructTypeInfo.setAllStructFieldTypeInfos(superTypeInfos); final String specificSchema = IOUtils.toString(Resources.getResource("avro-nested-struct.avsc") - .openStream()).replace("\n", ""); + .openStream()).replace(lineSeparator, ""); String expectedSchema = genSchema( specificSchema); Assert.assertEquals("Test for nested struct's avro schema failed", Index: service/pom.xml =================================================================== --- service/pom.xml (revision 1637277) +++ service/pom.xml (working copy) @@ -86,6 +86,11 @@ libthrift ${libthrift.version} + + org.apache.curator + curator-framework + ${curator.version} + org.apache.hive Index: service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java =================================================================== --- service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java (revision 1637277) +++ service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java (working copy) @@ -21,9 +21,13 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; +import javax.net.ssl.SSLServerSocket; import javax.security.auth.login.LoginException; import javax.security.sasl.Sasl; @@ -43,13 +47,17 @@ import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportException; import org.apache.thrift.transport.TTransportFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class helps in some aspects of authentication. It creates the proper Thrift classes for the * given configuration as well as helps with authenticating requests. */ public class HiveAuthFactory { + private static final Logger LOG = LoggerFactory.getLogger(HiveAuthFactory.class); + public enum AuthTypes { NOSASL("NOSASL"), NONE("NONE"), @@ -218,7 +226,8 @@ } public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath, - String keyStorePassWord) throws TTransportException, UnknownHostException { + String keyStorePassWord, List sslVersionBlacklist) + throws TTransportException, UnknownHostException { TSSLTransportFactory.TSSLTransportParameters params = new TSSLTransportFactory.TSSLTransportParameters(); params.setKeyStore(keyStorePath, keyStorePassWord); @@ -229,7 +238,25 @@ } else { serverAddress = InetAddress.getByName(hiveHost); } - return TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress, params); + TServerSocket thriftServerSocket = TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress, params); + if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) { + List sslVersionBlacklistLocal = new ArrayList(); + for (String sslVersion : sslVersionBlacklist) { + sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase()); + } + SSLServerSocket sslServerSocket = (SSLServerSocket)thriftServerSocket.getServerSocket(); + List enabledProtocols = new ArrayList(); + for (String protocol : sslServerSocket.getEnabledProtocols()) { + if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) { + LOG.debug("Disabling SSL Protocol: " + protocol); + } else { + enabledProtocols.add(protocol); + } + } + sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0])); + LOG.info("SSL Server Socket Enabled Protocols: " + Arrays.toString(sslServerSocket.getEnabledProtocols())); + } + return thriftServerSocket; } // retrieve delegation token for the given user Index: service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java =================================================================== --- service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java (revision 1637277) +++ service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java (working copy) @@ -18,16 +18,18 @@ package org.apache.hive.service.auth; -import java.io.IOException; +import java.security.AccessControlContext; +import java.security.AccessController; import java.security.PrivilegedExceptionAction; +import javax.security.auth.Subject; + import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.http.protocol.BasicHttpContext; import org.apache.http.protocol.HttpContext; import org.ietf.jgss.GSSContext; -import org.ietf.jgss.GSSCredential; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; import org.ietf.jgss.Oid; @@ -36,60 +38,54 @@ * Utility functions for HTTP mode authentication. */ public final class HttpAuthUtils { - public static final String WWW_AUTHENTICATE = "WWW-Authenticate"; public static final String AUTHORIZATION = "Authorization"; public static final String BASIC = "Basic"; public static final String NEGOTIATE = "Negotiate"; - + /** * @return Stringified Base64 encoded kerberosAuthHeader on success + * @throws Exception */ - public static String getKerberosServiceTicket(String principal, String host, String serverHttpUrl) - throws IOException, InterruptedException { - UserGroupInformation clientUGI = getClientUGI("kerberos"); - String serverPrincipal = getServerPrincipal(principal, host); - // Uses the Ticket Granting Ticket in the UserGroupInformation - return clientUGI.doAs( - new HttpKerberosClientAction(serverPrincipal, clientUGI.getUserName(), serverHttpUrl)); + public static String getKerberosServiceTicket(String principal, String host, + String serverHttpUrl, boolean assumeSubject) throws Exception { + String serverPrincipal = + ShimLoader.getHadoopThriftAuthBridge().getServerPrincipal(principal, host); + if (assumeSubject) { + // With this option, we're assuming that the external application, + // using the JDBC driver has done a JAAS kerberos login already + AccessControlContext context = AccessController.getContext(); + Subject subject = Subject.getSubject(context); + if (subject == null) { + throw new Exception("The Subject is not set"); + } + return Subject.doAs(subject, new HttpKerberosClientAction(serverPrincipal, serverHttpUrl)); + } else { + // JAAS login from ticket cache to setup the client UserGroupInformation + UserGroupInformation clientUGI = + ShimLoader.getHadoopThriftAuthBridge().getCurrentUGIWithConf("kerberos"); + return clientUGI.doAs(new HttpKerberosClientAction(serverPrincipal, serverHttpUrl)); + } } - /** - * Get server principal and verify that hostname is present. - */ - private static String getServerPrincipal(String principal, String host) throws IOException { - return ShimLoader.getHadoopThriftAuthBridge().getServerPrincipal(principal, host); + private HttpAuthUtils() { + throw new UnsupportedOperationException("Can't initialize class"); } /** - * JAAS login to setup the client UserGroupInformation. - * Sets up the Kerberos Ticket Granting Ticket, - * in the client UserGroupInformation object. - * - * @return Client's UserGroupInformation + * We'll create an instance of this class within a doAs block so that the client's TGT credentials + * can be read from the Subject */ - public static UserGroupInformation getClientUGI(String authType) throws IOException { - return ShimLoader.getHadoopThriftAuthBridge().getCurrentUGIWithConf(authType); - } - - private HttpAuthUtils() { - throw new UnsupportedOperationException("Can't initialize class"); - } - public static class HttpKerberosClientAction implements PrivilegedExceptionAction { - public static final String HTTP_RESPONSE = "HTTP_RESPONSE"; public static final String SERVER_HTTP_URL = "SERVER_HTTP_URL"; private final String serverPrincipal; - private final String clientUserName; private final String serverHttpUrl; private final Base64 base64codec; private final HttpContext httpContext; - public HttpKerberosClientAction(String serverPrincipal, String clientUserName, - String serverHttpUrl) { + public HttpKerberosClientAction(String serverPrincipal, String serverHttpUrl) { this.serverPrincipal = serverPrincipal; - this.clientUserName = clientUserName; this.serverHttpUrl = serverHttpUrl; base64codec = new Base64(0); httpContext = new BasicHttpContext(); @@ -102,38 +98,17 @@ Oid mechOid = new Oid("1.2.840.113554.1.2.2"); // Oid for kerberos principal name Oid krb5PrincipalOid = new Oid("1.2.840.113554.1.2.2.1"); - GSSManager manager = GSSManager.getInstance(); - - // GSS name for client - GSSName clientName = manager.createName(clientUserName, GSSName.NT_USER_NAME); // GSS name for server GSSName serverName = manager.createName(serverPrincipal, krb5PrincipalOid); - - // GSS credentials for client - GSSCredential clientCreds = - manager.createCredential(clientName, GSSCredential.DEFAULT_LIFETIME, mechOid, - GSSCredential.INITIATE_ONLY); - - /* - * Create a GSSContext for mutual authentication with the - * server. - * - serverName is the GSSName that represents the server. - * - krb5Oid is the Oid that represents the mechanism to - * use. The client chooses the mechanism to use. - * - clientCreds are the client credentials - */ + // Create a GSSContext for authentication with the service. + // We're passing client credentials as null since we want them to be read from the Subject. GSSContext gssContext = - manager.createContext(serverName, mechOid, clientCreds, GSSContext.DEFAULT_LIFETIME); - - // Mutual authentication not r + manager.createContext(serverName, mechOid, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestMutualAuth(false); - // Establish context byte[] inToken = new byte[0]; - byte[] outToken = gssContext.initSecContext(inToken, 0, inToken.length); - gssContext.dispose(); // Base64 encoded and stringified token for server return new String(base64codec.encode(outToken)); Index: service/src/java/org/apache/hive/service/cli/operation/OperationLog.java =================================================================== --- service/src/java/org/apache/hive/service/cli/operation/OperationLog.java (revision 1637277) +++ service/src/java/org/apache/hive/service/cli/operation/OperationLog.java (working copy) @@ -128,6 +128,12 @@ void remove() { try { + if (in != null) { + in.close(); + } + if (out != null) { + out.close(); + } FileUtils.forceDelete(file); isRemoved = true; } catch (Exception e) { Index: service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java =================================================================== --- service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java (revision 1637277) +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java (working copy) @@ -18,6 +18,8 @@ package org.apache.hive.service.cli.thrift; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; @@ -56,6 +58,10 @@ TTransportFactory transportFactory = hiveAuthFactory.getAuthTransFactory(); TProcessorFactory processorFactory = hiveAuthFactory.getAuthProcFactory(this); TServerSocket serverSocket = null; + List sslVersionBlacklist = new ArrayList(); + for (String sslVersion : hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) { + sslVersionBlacklist.add(sslVersion); + } if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)) { serverSocket = HiveAuthFactory.getServerSocket(hiveHost, portNum); } else { @@ -67,13 +73,16 @@ String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname); serverSocket = HiveAuthFactory.getServerSSLSocket(hiveHost, portNum, keyStorePath, - keyStorePassword); + keyStorePassword, sslVersionBlacklist); } // Server args + int maxMessageSize = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE); TThreadPoolServer.Args sargs = new TThreadPoolServer.Args(serverSocket) .processorFactory(processorFactory).transportFactory(transportFactory) - .protocolFactory(new TBinaryProtocol.Factory()).executorService(executorService); + .protocolFactory(new TBinaryProtocol.Factory()) + .inputProtocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize)) + .executorService(executorService); // TCP Server server = new TThreadPoolServer(sargs); Index: service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java =================================================================== --- service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java (revision 1637277) +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java (working copy) @@ -19,7 +19,8 @@ package org.apache.hive.service.cli.thrift; import java.io.IOException; -import java.net.InetSocketAddress; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -31,6 +32,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hive.service.AbstractService; +import org.apache.hive.service.ServiceException; import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.auth.TSetIpAddressProcessor; import org.apache.hive.service.cli.*; @@ -53,7 +55,7 @@ protected static HiveAuthFactory hiveAuthFactory; protected int portNum; - protected InetSocketAddress serverAddress; + protected InetAddress serverAddress; protected String hiveHost; protected TServer server; protected org.eclipse.jetty.server.Server httpServer; @@ -75,13 +77,21 @@ @Override public synchronized void init(HiveConf hiveConf) { this.hiveConf = hiveConf; - // Initialize common server configs needed in both binary & http modes String portString; hiveHost = System.getenv("HIVE_SERVER2_THRIFT_BIND_HOST"); if (hiveHost == null) { hiveHost = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST); } + try { + if (hiveHost != null && !hiveHost.isEmpty()) { + serverAddress = InetAddress.getByName(hiveHost); + } else { + serverAddress = InetAddress.getLocalHost(); + } + } catch (UnknownHostException e) { + throw new ServiceException(e); + } // HTTP mode if (HiveServer2.isHTTPTransportMode(hiveConf)) { workerKeepAliveTime = @@ -105,11 +115,6 @@ portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT); } } - if (hiveHost != null && !hiveHost.isEmpty()) { - serverAddress = new InetSocketAddress(hiveHost, portNum); - } else { - serverAddress = new InetSocketAddress(portNum); - } minWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS); maxWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS); super.init(hiveConf); @@ -148,7 +153,7 @@ return portNum; } - public InetSocketAddress getServerAddress() { + public InetAddress getServerAddress() { return serverAddress; } Index: service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java =================================================================== --- service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java (revision 1637277) +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hive.service.cli.thrift; +import java.util.Arrays; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; @@ -83,6 +84,11 @@ + " Not configured for SSL connection"); } SslContextFactory sslContextFactory = new SslContextFactory(); + String[] excludedProtocols = hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(","); + LOG.info("HTTP Server SSL: adding excluded protocols: " + Arrays.toString(excludedProtocols)); + sslContextFactory.addExcludeProtocols(excludedProtocols); + LOG.info("HTTP Server SSL: SslContextFactory.getExcludeProtocols = " + + Arrays.toString(sslContextFactory.getExcludeProtocols())); sslContextFactory.setKeyStorePath(keyStorePath); sslContextFactory.setKeyStorePassword(keyStorePassword); connector = new SslSelectChannelConnector(sslContextFactory); Index: service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java =================================================================== --- service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java (revision 1637277) +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java (working copy) @@ -208,11 +208,9 @@ // Create a GSS context gssContext = manager.createContext(serverCreds); - // Get service ticket from the authorization header String serviceTicketBase64 = getAuthHeader(request, authType); byte[] inToken = Base64.decodeBase64(serviceTicketBase64.getBytes()); - gssContext.acceptSecContext(inToken, 0, inToken.length); // Authenticate or deny based on its context completion if (!gssContext.isEstablished()) { Index: service/src/java/org/apache/hive/service/server/HiveServer2.java =================================================================== --- service/src/java/org/apache/hive/service/server/HiveServer2.java (revision 1637277) +++ service/src/java/org/apache/hive/service/server/HiveServer2.java (working copy) @@ -32,6 +32,10 @@ import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.framework.api.ACLProvider; +import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.hadoop.hive.common.LogUtils; import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.conf.HiveConf; @@ -53,7 +57,6 @@ import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooDefs.Perms; -import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.ACL; /** @@ -66,7 +69,7 @@ private CLIService cliService; private ThriftCLIService thriftCLIService; private String znodePath; - private ZooKeeper zooKeeperClient; + private CuratorFramework zooKeeperClient; private boolean registeredWithZooKeeper = false; public HiveServer2() { @@ -74,7 +77,6 @@ HiveConf.setLoadHiveServer2Config(true); } - @Override public synchronized void init(HiveConf hiveConf) { cliService = new CLIService(this); @@ -109,36 +111,60 @@ } /** + * ACLProvider for providing appropriate ACLs to CuratorFrameworkFactory + */ + private final ACLProvider zooKeeperAclProvider = new ACLProvider() { + List nodeAcls = new ArrayList(); + + @Override + public List getDefaultAcl() { + if (ShimLoader.getHadoopShims().isSecurityEnabled()) { + // Read all to the world + nodeAcls.addAll(Ids.READ_ACL_UNSAFE); + // Create/Delete/Write/Admin to the authenticated user + nodeAcls.add(new ACL(Perms.ALL, Ids.AUTH_IDS)); + } else { + // ACLs for znodes on a non-kerberized cluster + // Create/Read/Delete/Write/Admin to the world + nodeAcls.addAll(Ids.OPEN_ACL_UNSAFE); + } + return nodeAcls; + } + + @Override + public List getAclForPath(String path) { + return getDefaultAcl(); + } + }; + + /** * Adds a server instance to ZooKeeper as a znode. * * @param hiveConf * @throws Exception */ private void addServerInstanceToZooKeeper(HiveConf hiveConf) throws Exception { - int zooKeeperSessionTimeout = - hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT); String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf); String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE); String instanceURI = getServerInstanceURI(hiveConf); byte[] znodeDataUTF8 = instanceURI.getBytes(Charset.forName("UTF-8")); - // Znode ACLs - List nodeAcls = new ArrayList(); - setUpAuthAndAcls(hiveConf, nodeAcls); - // Create a ZooKeeper client + setUpZooKeeperAuth(hiveConf); + // Create a CuratorFramework instance to be used as the ZooKeeper client + // Use the zooKeeperAclProvider to create appropriate ACLs zooKeeperClient = - new ZooKeeper(zooKeeperEnsemble, zooKeeperSessionTimeout, - new ZooKeeperHiveHelper.DummyWatcher()); + CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble) + .aclProvider(zooKeeperAclProvider).retryPolicy(new ExponentialBackoffRetry(1000, 3)) + .build(); + zooKeeperClient.start(); // Create the parent znodes recursively; ignore if the parent already exists. - // If pre-creating the parent on a kerberized cluster, ensure that you give ACLs, - // as explained in {@link #setUpAuthAndAcls(HiveConf, List) setUpAuthAndAcls} try { - ZooKeeperHiveHelper.createPathRecursively(zooKeeperClient, rootNamespace, nodeAcls, - CreateMode.PERSISTENT); + zooKeeperClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT) + .forPath(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace); LOG.info("Created the root name space: " + rootNamespace + " on ZooKeeper for HiveServer2"); } catch (KeeperException e) { if (e.code() != KeeperException.Code.NODEEXISTS) { LOG.fatal("Unable to create HiveServer2 namespace: " + rootNamespace + " on ZooKeeper", e); - throw (e); + throw e; } } // Create a znode under the rootNamespace parent for this instance of the server @@ -149,56 +175,40 @@ + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + "serverUri=" + instanceURI + ";" + "version=" + HiveVersionInfo.getVersion() + ";" + "sequence="; znodePath = - zooKeeperClient.create(pathPrefix, znodeDataUTF8, nodeAcls, - CreateMode.EPHEMERAL_SEQUENTIAL); + zooKeeperClient.create().creatingParentsIfNeeded() + .withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(pathPrefix, znodeDataUTF8); setRegisteredWithZooKeeper(true); // Set a watch on the znode - if (zooKeeperClient.exists(znodePath, new DeRegisterWatcher()) == null) { + if (zooKeeperClient.checkExists().usingWatcher(new DeRegisterWatcher()).forPath(znodePath) == null) { // No node exists, throw exception throw new Exception("Unable to create znode for this HiveServer2 instance on ZooKeeper."); } LOG.info("Created a znode on ZooKeeper for HiveServer2 uri: " + instanceURI); } catch (KeeperException e) { LOG.fatal("Unable to create a znode for this server instance", e); - throw new Exception(e); + throw (e); } } /** - * Set up ACLs for znodes based on whether the cluster is secure or not. - * On a kerberized cluster, ZooKeeper performs Kerberos-SASL authentication. - * We give Read privilege to the world, but Create/Delete/Write/Admin to the authenticated user. - * On a non-kerberized cluster, we give Create/Read/Delete/Write/Admin privileges to the world. + * For a kerberized cluster, we dynamically set up the client's JAAS conf. * - * For a kerberized cluster, we also dynamically set up the client's JAAS conf. * @param hiveConf - * @param nodeAcls * @return * @throws Exception */ - private void setUpAuthAndAcls(HiveConf hiveConf, List nodeAcls) throws Exception { + private void setUpZooKeeperAuth(HiveConf hiveConf) throws Exception { if (ShimLoader.getHadoopShims().isSecurityEnabled()) { String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL); if (principal.isEmpty()) { - throw new IOException( - "HiveServer2 Kerberos principal is empty"); + throw new IOException("HiveServer2 Kerberos principal is empty"); } String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); if (keyTabFile.isEmpty()) { - throw new IOException( - "HiveServer2 Kerberos keytab is empty"); + throw new IOException("HiveServer2 Kerberos keytab is empty"); } - // Install the JAAS Configuration for the runtime ShimLoader.getHadoopShims().setZookeeperClientKerberosJaasConfig(principal, keyTabFile); - // Read all to the world - nodeAcls.addAll(Ids.READ_ACL_UNSAFE); - // Create/Delete/Write/Admin to the authenticated user - nodeAcls.add(new ACL(Perms.ALL, Ids.AUTH_IDS)); - } else { - // ACLs for znodes on a non-kerberized cluster - // Create/Read/Delete/Write/Admin to the world - nodeAcls.addAll(Ids.OPEN_ACL_UNSAFE); } } @@ -243,7 +253,7 @@ if ((thriftCLIService == null) || (thriftCLIService.getServerAddress() == null)) { throw new Exception("Unable to get the server address; it hasn't been initialized yet."); } - return thriftCLIService.getServerAddress().getHostName() + ":" + return thriftCLIService.getServerAddress().getHostAddress() + ":" + thriftCLIService.getPortNumber(); } @@ -344,24 +354,25 @@ */ static void deleteServerInstancesFromZooKeeper(String versionNumber) throws Exception { HiveConf hiveConf = new HiveConf(); - int zooKeeperSessionTimeout = - hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT); String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf); String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE); - ZooKeeper zooKeeperClient = - new ZooKeeper(zooKeeperEnsemble, zooKeeperSessionTimeout, - new ZooKeeperHiveHelper.DummyWatcher()); - // Get all znode paths + CuratorFramework zooKeeperClient = + CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble) + .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build(); + zooKeeperClient.start(); List znodePaths = - zooKeeperClient.getChildren(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace, - false); + zooKeeperClient.getChildren().forPath( + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace); // Now for each path that is for the given versionNumber, delete the znode from ZooKeeper for (String znodePath : znodePaths) { if (znodePath.contains("version=" + versionNumber + ";")) { - zooKeeperClient.delete(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace - + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath, -1); + LOG.info("Removing the znode: " + znodePath + " from ZooKeeper"); + zooKeeperClient.delete().forPath( + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace + + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath); } } + zooKeeperClient.close(); } public static void main(String[] args) { @@ -516,8 +527,8 @@ } /** - * DeregisterOptionExecutor: executes the --deregister option by - * deregistering all HiveServer2 instances from ZooKeeper of a specific version. + * DeregisterOptionExecutor: executes the --deregister option by deregistering all HiveServer2 + * instances from ZooKeeper of a specific version. */ static class DeregisterOptionExecutor implements ServerOptionsExecutor { private final String versionNumber; @@ -539,4 +550,3 @@ } } } - Index: shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java =================================================================== --- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (revision 1637277) +++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (working copy) @@ -76,9 +76,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementPolicy; import org.apache.tez.test.MiniTezCluster; import com.google.common.base.Joiner; @@ -90,7 +87,6 @@ * Implemention of shims against Hadoop 0.23.0. */ public class Hadoop23Shims extends HadoopShimsSecure { - private static final String MR2_JOB_QUEUE_PROPERTY = "mapreduce.job.queuename"; HadoopShims.MiniDFSShim cluster = null; @@ -230,22 +226,13 @@ */ @Override public void refreshDefaultQueue(Configuration conf, String userName) throws IOException { - String requestedQueue = YarnConfiguration.DEFAULT_QUEUE_NAME; if (StringUtils.isNotBlank(userName) && isFairScheduler(conf)) { - AllocationConfiguration allocConf = new AllocationConfiguration(conf); - QueuePlacementPolicy queuePolicy = allocConf.getPlacementPolicy(); - if (queuePolicy != null) { - requestedQueue = queuePolicy.assignAppToQueue(requestedQueue, userName); - if (StringUtils.isNotBlank(requestedQueue)) { - LOG.debug("Setting queue name to " + requestedQueue + " for user " + userName); - conf.set(MR2_JOB_QUEUE_PROPERTY, requestedQueue); - } - } + ShimLoader.getSchedulerShims().refreshDefaultQueue(conf, userName); } } private boolean isFairScheduler (Configuration conf) { - return FairScheduler.class.getName(). + return "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler". equalsIgnoreCase(conf.get(YarnConfiguration.RM_SCHEDULER)); } Index: shims/aggregator/pom.xml =================================================================== --- shims/aggregator/pom.xml (revision 1637277) +++ shims/aggregator/pom.xml (working copy) @@ -63,5 +63,11 @@ ${project.version} runtime + + org.apache.hive.shims + hive-shims-scheduler + ${project.version} + runtime + Index: shims/common/src/main/java/org/apache/hadoop/hive/shims/SchedulerShim.java =================================================================== --- shims/common/src/main/java/org/apache/hadoop/hive/shims/SchedulerShim.java (revision 0) +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/SchedulerShim.java (working copy) @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.shims; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; + +/** + * Shim for Fair scheduler + * HiveServer2 uses fair scheduler API to resolve the queue mapping for non-impersonation + * mode. This shim is avoid direct dependency of yarn fair scheduler on Hive. + */ +public interface SchedulerShim { + /** + * Reset the default fair scheduler queue mapping to end user. + * @param conf + * @param userName end user name + */ + public void refreshDefaultQueue(Configuration conf, String userName) + throws IOException; +} Index: shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java =================================================================== --- shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java (revision 1637277) +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java (working copy) @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; import org.apache.hadoop.util.VersionInfo; import org.apache.log4j.AppenderSkeleton; @@ -33,6 +34,7 @@ private static JettyShims jettyShims; private static AppenderSkeleton eventCounter; private static HadoopThriftAuthBridge hadoopThriftAuthBridge; + private static SchedulerShim schedulerShim; /** * The names of the classes for shimming Hadoop for each major version. @@ -87,6 +89,9 @@ } + private static final String SCHEDULER_SHIM_CLASSE = + "org.apache.hadoop.hive.schshim.FairSchedulerShim"; + /** * Factory method to get an instance of HadoopShims based on the * version of Hadoop on the classpath. @@ -124,6 +129,13 @@ return hadoopThriftAuthBridge; } + public static synchronized SchedulerShim getSchedulerShims() { + if (schedulerShim == null) { + schedulerShim = createShim(SCHEDULER_SHIM_CLASSE, SchedulerShim.class); + } + return schedulerShim; + } + private static T loadShims(Map classMap, Class xface) { String vers = getMajorVersion(); String className = classMap.get(vers); Index: shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java =================================================================== --- shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java (revision 1637277) +++ shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java (working copy) @@ -463,6 +463,16 @@ @Override public UserGroupInformation getUGIForConf(Configuration conf) throws IOException { + String doAs = System.getenv("HADOOP_USER_NAME"); + if(doAs != null && doAs.length() > 0) { + /* + * this allows doAs (proxy user) to be passed along across process boundary where + * delegation tokens are not supported. For example, a DDL stmt via WebHCat with + * a doAs parameter, forks to 'hcat' which needs to start a Session that + * proxies the end user + */ + return UserGroupInformation.createProxyUser(doAs, UserGroupInformation.getLoginUser()); + } return UserGroupInformation.getCurrentUser(); } Index: shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java =================================================================== --- shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java (revision 1637277) +++ shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java (working copy) @@ -62,8 +62,7 @@ private String rootNode = ""; private volatile CuratorFramework zkSession; private String zkConnectString; - private final int zkSessionTimeout = 3000; - private int connectTimeoutMillis = -1; + private int connectTimeoutMillis; private List newNodeAcl = Arrays.asList(new ACL(Perms.ALL, Ids.AUTH_IDS)); /** @@ -101,10 +100,10 @@ if (zkSession == null || zkSession.getState() == CuratorFrameworkState.STOPPED) { synchronized (this) { if (zkSession == null || zkSession.getState() == CuratorFrameworkState.STOPPED) { - zkSession = CuratorFrameworkFactory.builder().connectString(zkConnectString) - .sessionTimeoutMs(zkSessionTimeout).connectionTimeoutMs(connectTimeoutMillis) - .aclProvider(aclDefaultProvider) - .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build(); + zkSession = + CuratorFrameworkFactory.builder().connectString(zkConnectString) + .connectionTimeoutMs(connectTimeoutMillis).aclProvider(aclDefaultProvider) + .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build(); zkSession.start(); } } @@ -431,12 +430,14 @@ @Override public void init(Object objectStore, ServerMode smode) { this.serverMode = smode; - zkConnectString = conf.get( - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR, null); + zkConnectString = + conf.get(HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR, null); if (zkConnectString == null || zkConnectString.trim().isEmpty()) { // try alternate config param - zkConnectString = conf.get( - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR_ALTERNATE, null); + zkConnectString = + conf.get( + HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR_ALTERNATE, + null); if (zkConnectString == null || zkConnectString.trim().isEmpty()) { throw new IllegalArgumentException("Zookeeper connect string has to be specifed through " + "either " + HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR @@ -445,14 +446,17 @@ + WHEN_ZK_DSTORE_MSG); } } - connectTimeoutMillis = conf.getInt( - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_TIMEOUTMILLIS, -1); + connectTimeoutMillis = + conf.getInt( + HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_TIMEOUTMILLIS, + CuratorFrameworkFactory.builder().getConnectionTimeoutMs()); String aclStr = conf.get(HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ACL, null); if (StringUtils.isNotBlank(aclStr)) { this.newNodeAcl = parseACLs(aclStr); } - rootNode = conf.get(HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ZNODE, - HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ZNODE_DEFAULT) + serverMode; + rootNode = + conf.get(HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ZNODE, + HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ZNODE_DEFAULT) + serverMode; try { // Install the JAAS Configuration for the runtime Index: shims/pom.xml =================================================================== --- shims/pom.xml (revision 1637277) +++ shims/pom.xml (working copy) @@ -37,6 +37,7 @@ common-secure 0.20S 0.23 + scheduler aggregator Index: shims/scheduler/pom.xml =================================================================== --- shims/scheduler/pom.xml (revision 0) +++ shims/scheduler/pom.xml (working copy) @@ -0,0 +1,93 @@ + + + + 4.0.0 + + org.apache.hive + hive + 0.15.0-SNAPSHOT + ../../pom.xml + + + org.apache.hive.shims + hive-shims-scheduler + jar + Hive Shims Scheduler + + + ../.. + + + + + + + org.apache.hive.shims + hive-shims-common-secure + ${project.version} + + + + commons-logging + commons-logging + ${commons-logging.version} + + + org.apache.hadoop + hadoop-common + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-mapreduce-client-core + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-api + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-common + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-client + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-server-resourcemanager + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-server-tests + ${hadoop-23.version} + true + test-jar + + + Index: shims/scheduler/src/main/java/org/apache/hadoop/hive/schshim/FairSchedulerShim.java =================================================================== --- shims/scheduler/src/main/java/org/apache/hadoop/hive/schshim/FairSchedulerShim.java (revision 0) +++ shims/scheduler/src/main/java/org/apache/hadoop/hive/schshim/FairSchedulerShim.java (working copy) @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.schshim; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.shims.SchedulerShim; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementPolicy; + +public class FairSchedulerShim implements SchedulerShim { + private static final Log LOG = LogFactory.getLog(FairSchedulerShim.class); + private static final String MR2_JOB_QUEUE_PROPERTY = "mapreduce.job.queuename"; + + @Override + public void refreshDefaultQueue(Configuration conf, String userName) + throws IOException { + String requestedQueue = YarnConfiguration.DEFAULT_QUEUE_NAME; + final AtomicReference allocConf = new AtomicReference(); + + AllocationFileLoaderService allocsLoader = new AllocationFileLoaderService(); + allocsLoader.init(conf); + allocsLoader.setReloadListener(new AllocationFileLoaderService.Listener() { + @Override + public void onReload(AllocationConfiguration allocs) { + allocConf.set(allocs); + } + }); + try { + allocsLoader.reloadAllocations(); + } catch (Exception ex) { + throw new IOException("Failed to load queue allocations", ex); + } + if (allocConf.get() == null) { + allocConf.set(new AllocationConfiguration(conf)); + } + QueuePlacementPolicy queuePolicy = allocConf.get().getPlacementPolicy(); + if (queuePolicy != null) { + requestedQueue = queuePolicy.assignAppToQueue(requestedQueue, userName); + if (StringUtils.isNotBlank(requestedQueue)) { + LOG.debug("Setting queue name to " + requestedQueue + " for user " + + userName); + conf.set(MR2_JOB_QUEUE_PROPERTY, requestedQueue); + } + } + } + +}